[llvm] d63d662 - [RISCV] Remove --riscv-no-aliases from RVV tests

Jessica Clarke via llvm-commits llvm-commits at lists.llvm.org
Wed May 26 10:00:50 PDT 2021


Author: Jessica Clarke
Date: 2021-05-26T17:59:38+01:00
New Revision: d63d662d3cc51219fb08908ebea8d5851e53adb8

URL: https://github.com/llvm/llvm-project/commit/d63d662d3cc51219fb08908ebea8d5851e53adb8
DIFF: https://github.com/llvm/llvm-project/commit/d63d662d3cc51219fb08908ebea8d5851e53adb8.diff

LOG: [RISCV] Remove --riscv-no-aliases from RVV tests

This serves no useful purpose other than to clutter things up. Diff
summary as the real diff is extremely unwieldy:

   24844 -; CHECK-NEXT:    jalr zero, 0(ra)
   24844 +; CHECK-NEXT:    ret
       8 -; CHECK-NEXT:    vl4re8.v v28, (a0)
       8 +; CHECK-NEXT:    vl4r.v v28, (a0)
      64 -; CHECK-NEXT:    vl8re8.v v24, (a0)
      64 +; CHECK-NEXT:    vl8r.v v24, (a0)
     392 -; RUN:   --riscv-no-aliases < %s | FileCheck %s
     392 +; RUN:   < %s | FileCheck %s
       1 -; RUN:   -verify-machineinstrs --riscv-no-aliases < %s \
       1 +; RUN:   -verify-machineinstrs < %s \

As discussed in D103004.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
    llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
index bafdd31396a7..aad8f23d7efe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vaadd.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vaadd.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vaadd.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
index b6af9496bf10..981d1bd3a63f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
index 0e3b7798fc5a..8dfdfa71b466 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
index 31fcfcfae329..4644d3139abf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
index b65e5bd99e18..06ef1289b369 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@ define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@ define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@ define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@ define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@ define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@ define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@ define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@ define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@ define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@ define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@ define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@ define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@ define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@ define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@ define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@ define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@ define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@ define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@ define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@ define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@ define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@ define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@ define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@ define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@ define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@ define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@ define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@ define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@ define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@ define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@ define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -898,7 +898,7 @@ define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vadc.vvm v8, v8, v25, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -926,7 +926,7 @@ define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vadc.vvm v8, v8, v26, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -954,7 +954,7 @@ define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vadc.vvm v8, v8, v28, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -982,7 +982,7 @@ define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -998,7 +998,7 @@ define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1014,7 +1014,7 @@ define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1030,7 +1030,7 @@ define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1046,7 +1046,7 @@ define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1062,7 +1062,7 @@ define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1078,7 +1078,7 @@ define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1094,7 +1094,7 @@ define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1110,7 +1110,7 @@ define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1142,7 +1142,7 @@ define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1174,7 +1174,7 @@ define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1190,7 +1190,7 @@ define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1206,7 +1206,7 @@ define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1222,7 +1222,7 @@ define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1238,7 +1238,7 @@ define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1270,7 +1270,7 @@ define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1286,7 +1286,7 @@ define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1302,7 +1302,7 @@ define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1318,7 +1318,7 @@ define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1334,7 +1334,7 @@ define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
index ac991d5536f1..3d7756afaecb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@ define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@ define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@ define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@ define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@ define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@ define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@ define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@ define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@ define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@ define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@ define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@ define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@ define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@ define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@ define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@ define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@ define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@ define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@ define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@ define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@ define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@ define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@ define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@ define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@ define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@ define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@ define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@ define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@ define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@ define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@ define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -892,7 +892,7 @@ define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -914,7 +914,7 @@ define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -936,7 +936,7 @@ define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -958,7 +958,7 @@ define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -990,7 +990,7 @@ define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1006,7 +1006,7 @@ define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1022,7 +1022,7 @@ define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1038,7 +1038,7 @@ define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1086,7 +1086,7 @@ define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1102,7 +1102,7 @@ define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1134,7 +1134,7 @@ define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1150,7 +1150,7 @@ define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1166,7 +1166,7 @@ define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1198,7 +1198,7 @@ define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1214,7 +1214,7 @@ define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1230,7 +1230,7 @@ define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1246,7 +1246,7 @@ define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1262,7 +1262,7 @@ define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1278,7 +1278,7 @@ define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1310,7 +1310,7 @@ define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
index 12a47b640142..187b27983647 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vadd.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vadd.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vadd.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
index 1bedcca63b64..b3c6f06872cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
index 87fa74d12cf8..723ef8be3252 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
index c36bd2b32c7f..2998e8b777e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
index 01013474656c..313d205f8e3b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
index 6b8046cc1193..2af4dd712ee4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
index 9d7be0c80360..418b19f3f093 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
index a8e6f95e602f..d00eff0ddc6c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
index 3a0d1302f9d8..dba4340e7012 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i64(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i64(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i64(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i64(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i32(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i32(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i32(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i32(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i16(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i16(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i16(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i16(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i16(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i16(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i16(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i16(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i8(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i8(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i8(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i8(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i8(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i8(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i8(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i8(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
index cc23e7dca324..99af9fc884eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i64(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i64(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i64(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i64(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i32(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i32(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i32(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i32(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i16(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i16(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i16(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i16(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i16(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i16(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i16(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i16(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i8(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i8(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i8(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i8(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i8(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i8(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i8(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i8(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
index cff18fb9bb95..fc4478a5aecd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
index 03c9311d9879..86652af4d541 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
index 593730442a6f..f4ec68e24e84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i64(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i64(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i64(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i64(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i32(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i32(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i32(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i32(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i16(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i16(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i16(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i16(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i16(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i16(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i16(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i16(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i8(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i8(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i8(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i8(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i8(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i8(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i8(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i8(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
index 16083d6d4fd4..41e176316061 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i64(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i64(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i64(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i64(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i32(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i32(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i32(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i32(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i16(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i16(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i16(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i16(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i16(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i16(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i16(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i16(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i8(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i8(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i8(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i8(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i8(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i8(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i8(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i8(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
index 4cfdbb0cf16f..4065a9125bca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i64(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i32(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i32(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i16(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i16(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i8(<vscale x 16 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
index e12c2e28c71c..c4711cc12d9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i64(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i32(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i32(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i16(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i16(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i8(<vscale x 16 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
index 6e25c1479734..92dd36187681 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i64(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i64(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i64(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i64(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
     <vscale x 1 x float> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
     <vscale x 2 x float> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
     <vscale x 4 x float> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
     <vscale x 8 x float> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> *%0,
@@ -589,7 +589,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
     <vscale x 1 x double> *%0,
@@ -613,7 +613,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> *%0,
@@ -637,7 +637,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
     <vscale x 2 x double> *%0,
@@ -661,7 +661,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> *%0,
@@ -685,7 +685,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
     <vscale x 4 x double> *%0,
@@ -709,7 +709,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> *%0,
@@ -733,7 +733,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
     <vscale x 8 x double> *%0,
@@ -757,7 +757,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> *%0,
@@ -781,7 +781,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -805,7 +805,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -829,7 +829,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i32(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i32(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i32(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i32(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
     <vscale x 1 x float> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32(
     <vscale x 2 x float> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32(
     <vscale x 4 x float> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32(
     <vscale x 8 x float> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32(
     <vscale x 16 x float> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32(
     <vscale x 1 x double> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32(
     <vscale x 2 x double> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32(
     <vscale x 4 x double> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32(
     <vscale x 8 x double> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i16(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -1693,7 +1693,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i16(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -1717,7 +1717,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -1741,7 +1741,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i16(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -1765,7 +1765,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -1789,7 +1789,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i16(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1813,7 +1813,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1837,7 +1837,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1861,7 +1861,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1885,7 +1885,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i16(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1909,7 +1909,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1933,7 +1933,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i16(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1957,7 +1957,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1981,7 +1981,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i16(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -2005,7 +2005,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -2029,7 +2029,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i16(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -2053,7 +2053,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -2077,7 +2077,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
     <vscale x 1 x float> *%0,
@@ -2101,7 +2101,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> *%0,
@@ -2125,7 +2125,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16(
     <vscale x 2 x float> *%0,
@@ -2149,7 +2149,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> *%0,
@@ -2173,7 +2173,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16(
     <vscale x 4 x float> *%0,
@@ -2197,7 +2197,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> *%0,
@@ -2221,7 +2221,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16(
     <vscale x 8 x float> *%0,
@@ -2245,7 +2245,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> *%0,
@@ -2269,7 +2269,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16(
     <vscale x 16 x float> *%0,
@@ -2293,7 +2293,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> *%0,
@@ -2317,7 +2317,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16(
     <vscale x 1 x double> *%0,
@@ -2341,7 +2341,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> *%0,
@@ -2365,7 +2365,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16(
     <vscale x 2 x double> *%0,
@@ -2389,7 +2389,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> *%0,
@@ -2413,7 +2413,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16(
     <vscale x 4 x double> *%0,
@@ -2437,7 +2437,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> *%0,
@@ -2461,7 +2461,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16(
     <vscale x 8 x double> *%0,
@@ -2485,7 +2485,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> *%0,
@@ -2509,7 +2509,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i8(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -2533,7 +2533,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -2557,7 +2557,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i8(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -2581,7 +2581,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -2605,7 +2605,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i8(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -2629,7 +2629,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -2653,7 +2653,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i8(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -2677,7 +2677,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -2701,7 +2701,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -2725,7 +2725,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -2749,7 +2749,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i8(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -2773,7 +2773,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -2797,7 +2797,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i8(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -2821,7 +2821,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -2845,7 +2845,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i8(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -2869,7 +2869,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -2893,7 +2893,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i8(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -2917,7 +2917,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -2941,7 +2941,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i8(<vscale x 1 x f
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
     <vscale x 1 x float> *%0,
@@ -2965,7 +2965,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> *%0,
@@ -2989,7 +2989,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i8(<vscale x 2 x f
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8(
     <vscale x 2 x float> *%0,
@@ -3013,7 +3013,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> *%0,
@@ -3037,7 +3037,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i8(<vscale x 4 x f
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8(
     <vscale x 4 x float> *%0,
@@ -3061,7 +3061,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> *%0,
@@ -3085,7 +3085,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i8(<vscale x 8 x f
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8(
     <vscale x 8 x float> *%0,
@@ -3109,7 +3109,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> *%0,
@@ -3133,7 +3133,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8(
     <vscale x 16 x float> *%0,
@@ -3157,7 +3157,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> *%0,
@@ -3181,7 +3181,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8(
     <vscale x 1 x double> *%0,
@@ -3205,7 +3205,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> *%0,
@@ -3229,7 +3229,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8(
     <vscale x 2 x double> *%0,
@@ -3253,7 +3253,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> *%0,
@@ -3277,7 +3277,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8(
     <vscale x 4 x double> *%0,
@@ -3301,7 +3301,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> *%0,
@@ -3325,7 +3325,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8(
     <vscale x 8 x double> *%0,
@@ -3349,7 +3349,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
index 043d80d483de..52989f317bf8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i64(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i64(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i64(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i64(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
     <vscale x 1 x float> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
     <vscale x 2 x float> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
     <vscale x 4 x float> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
     <vscale x 8 x float> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> *%0,
@@ -589,7 +589,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
     <vscale x 1 x double> *%0,
@@ -613,7 +613,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> *%0,
@@ -637,7 +637,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
     <vscale x 2 x double> *%0,
@@ -661,7 +661,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> *%0,
@@ -685,7 +685,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
     <vscale x 4 x double> *%0,
@@ -709,7 +709,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> *%0,
@@ -733,7 +733,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
     <vscale x 8 x double> *%0,
@@ -757,7 +757,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> *%0,
@@ -781,7 +781,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -805,7 +805,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -829,7 +829,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i32(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i32(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i32(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i32(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
     <vscale x 1 x float> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32(
     <vscale x 2 x float> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32(
     <vscale x 4 x float> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32(
     <vscale x 8 x float> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32(
     <vscale x 16 x float> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32(
     <vscale x 1 x double> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32(
     <vscale x 2 x double> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32(
     <vscale x 4 x double> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32(
     <vscale x 8 x double> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i16(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -1693,7 +1693,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i16(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -1717,7 +1717,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -1741,7 +1741,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i16(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -1765,7 +1765,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -1789,7 +1789,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i16(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1813,7 +1813,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1837,7 +1837,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1861,7 +1861,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1885,7 +1885,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i16(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1909,7 +1909,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1933,7 +1933,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i16(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1957,7 +1957,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1981,7 +1981,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i16(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -2005,7 +2005,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -2029,7 +2029,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i16(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -2053,7 +2053,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -2077,7 +2077,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
     <vscale x 1 x float> *%0,
@@ -2101,7 +2101,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> *%0,
@@ -2125,7 +2125,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16(
     <vscale x 2 x float> *%0,
@@ -2149,7 +2149,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> *%0,
@@ -2173,7 +2173,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16(
     <vscale x 4 x float> *%0,
@@ -2197,7 +2197,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> *%0,
@@ -2221,7 +2221,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16(
     <vscale x 8 x float> *%0,
@@ -2245,7 +2245,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> *%0,
@@ -2269,7 +2269,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16(
     <vscale x 16 x float> *%0,
@@ -2293,7 +2293,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> *%0,
@@ -2317,7 +2317,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16(
     <vscale x 1 x double> *%0,
@@ -2341,7 +2341,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> *%0,
@@ -2365,7 +2365,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16(
     <vscale x 2 x double> *%0,
@@ -2389,7 +2389,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> *%0,
@@ -2413,7 +2413,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16(
     <vscale x 4 x double> *%0,
@@ -2437,7 +2437,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> *%0,
@@ -2461,7 +2461,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16(
     <vscale x 8 x double> *%0,
@@ -2485,7 +2485,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> *%0,
@@ -2509,7 +2509,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i8(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -2533,7 +2533,7 @@ define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -2557,7 +2557,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i8(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -2581,7 +2581,7 @@ define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -2605,7 +2605,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i8(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -2629,7 +2629,7 @@ define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -2653,7 +2653,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i8(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -2677,7 +2677,7 @@ define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -2701,7 +2701,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -2725,7 +2725,7 @@ define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -2749,7 +2749,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i8(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -2773,7 +2773,7 @@ define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -2797,7 +2797,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i8(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -2821,7 +2821,7 @@ define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -2845,7 +2845,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i8(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -2869,7 +2869,7 @@ define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -2893,7 +2893,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i8(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -2917,7 +2917,7 @@ define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -2941,7 +2941,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i8(<vscale x 1 x f
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
     <vscale x 1 x float> *%0,
@@ -2965,7 +2965,7 @@ define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> *%0,
@@ -2989,7 +2989,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i8(<vscale x 2 x f
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8(
     <vscale x 2 x float> *%0,
@@ -3013,7 +3013,7 @@ define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> *%0,
@@ -3037,7 +3037,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i8(<vscale x 4 x f
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8(
     <vscale x 4 x float> *%0,
@@ -3061,7 +3061,7 @@ define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> *%0,
@@ -3085,7 +3085,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i8(<vscale x 8 x f
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8(
     <vscale x 8 x float> *%0,
@@ -3109,7 +3109,7 @@ define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> *%0,
@@ -3133,7 +3133,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8(
     <vscale x 16 x float> *%0,
@@ -3157,7 +3157,7 @@ define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> *%0,
@@ -3181,7 +3181,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8(
     <vscale x 1 x double> *%0,
@@ -3205,7 +3205,7 @@ define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> *%0,
@@ -3229,7 +3229,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8(
     <vscale x 2 x double> *%0,
@@ -3253,7 +3253,7 @@ define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> *%0,
@@ -3277,7 +3277,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8(
     <vscale x 4 x double> *%0,
@@ -3301,7 +3301,7 @@ define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> *%0,
@@ -3325,7 +3325,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8(
     <vscale x 8 x double> *%0,
@@ -3349,7 +3349,7 @@ define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
index fe48785030a5..5061ea509b66 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
index ce250b0487fc..f732b98c40d6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i64(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i64(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i64(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i64(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i32(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i32(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i32(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i32(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i16(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i16(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i16(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i16(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i16(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i16(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i16(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i16(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@ define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@ define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@ define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@ define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@ define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@ define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i8(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
index f0ebddf9583b..40272c1ee215 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vand.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vand.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vand.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@ define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@ define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@ define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@ define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@ define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@ define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@ define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@ define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@ define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@ define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@ define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@ define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@ define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@ define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@ define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@ define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@ define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@ define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@ define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
index 94c63dd9c9e5..ce8677b5f237 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
index 37f46aaba1ba..04bbf5ccc6e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vasub.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vasub.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vasub.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
index b097a34b7279..0759e1558046 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
index d26c5e45903f..84f69449f122 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vasubu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vasubu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vasubu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
index 0d14aec38dd1..f41d9faf06a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
index 2f155d9a5c67..d5bfa278276d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@ define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@ define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@ define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@ define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@ define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@ define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@ define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@ define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@ define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@ define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@ define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@ define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@ define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@ define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@ define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
     <vscale x 1 x half> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
     <vscale x 2 x half> %0,
@@ -540,7 +540,7 @@ define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
     <vscale x 4 x half> %0,
@@ -562,7 +562,7 @@ define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
     <vscale x 8 x half> %0,
@@ -584,7 +584,7 @@ define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
     <vscale x 16 x half> %0,
@@ -606,7 +606,7 @@ define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
     <vscale x 32 x half> %0,
@@ -628,7 +628,7 @@ define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
     <vscale x 1 x float> %0,
@@ -650,7 +650,7 @@ define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
     <vscale x 2 x float> %0,
@@ -672,7 +672,7 @@ define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
     <vscale x 4 x float> %0,
@@ -694,7 +694,7 @@ define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
     <vscale x 8 x float> %0,
@@ -716,7 +716,7 @@ define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
     <vscale x 16 x float> %0,
@@ -738,7 +738,7 @@ define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
     <vscale x 1 x double> %0,
@@ -760,7 +760,7 @@ define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
     <vscale x 2 x double> %0,
@@ -782,7 +782,7 @@ define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
     <vscale x 4 x double> %0,
@@ -804,7 +804,7 @@ define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
     <vscale x 8 x double> %0,
@@ -822,7 +822,7 @@ define <vscale x 1 x i8> @intrinsic_vcompress_um_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vcompress.vm v25, v8, v0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> undef,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
index 1dcdcab6d023..ce67123bf9ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@ define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@ define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@ define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@ define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@ define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@ define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@ define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@ define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@ define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@ define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@ define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@ define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@ define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@ define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@ define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
     <vscale x 1 x half> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
     <vscale x 2 x half> %0,
@@ -540,7 +540,7 @@ define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
     <vscale x 4 x half> %0,
@@ -562,7 +562,7 @@ define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
     <vscale x 8 x half> %0,
@@ -584,7 +584,7 @@ define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
     <vscale x 16 x half> %0,
@@ -606,7 +606,7 @@ define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
     <vscale x 32 x half> %0,
@@ -628,7 +628,7 @@ define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
     <vscale x 1 x float> %0,
@@ -650,7 +650,7 @@ define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
     <vscale x 2 x float> %0,
@@ -672,7 +672,7 @@ define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
     <vscale x 4 x float> %0,
@@ -694,7 +694,7 @@ define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
     <vscale x 8 x float> %0,
@@ -716,7 +716,7 @@ define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
     <vscale x 16 x float> %0,
@@ -738,7 +738,7 @@ define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
     <vscale x 1 x double> %0,
@@ -760,7 +760,7 @@ define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
     <vscale x 2 x double> %0,
@@ -782,7 +782,7 @@ define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
     <vscale x 4 x double> %0,
@@ -804,7 +804,7 @@ define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
     <vscale x 8 x double> %0,
@@ -822,7 +822,7 @@ define <vscale x 1 x i8> @intrinsic_vcompress_um_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vcompress.vm v25, v8, v0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> undef,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
index 11729fd4d624..eab4d1520225 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vdiv.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vdiv.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vdiv.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
index 99ce81cafa55..97478e4080d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
index a4725adde048..8174e21a77a1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vdivu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vdivu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vdivu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
index f2b8b8567197..7cd6d0370e0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
index 9366b83eb5e4..1aad81fe064d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
index b20c3bed3c9a..7052ae20ea45 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -56,7 +56,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -78,7 +78,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -100,7 +100,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -122,7 +122,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -144,7 +144,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -166,7 +166,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -232,7 +232,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -255,7 +255,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -299,7 +299,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -321,7 +321,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -343,7 +343,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -365,7 +365,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -387,7 +387,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -409,7 +409,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -431,7 +431,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -476,7 +476,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -498,7 +498,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -520,7 +520,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -542,7 +542,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -586,7 +586,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -608,7 +608,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -630,7 +630,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -653,7 +653,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -676,7 +676,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -699,7 +699,7 @@ define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -722,7 +722,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -745,7 +745,7 @@ define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -768,7 +768,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -791,7 +791,7 @@ define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -814,7 +814,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -837,7 +837,7 @@ define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -860,7 +860,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -883,7 +883,7 @@ define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -906,7 +906,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -929,7 +929,7 @@ define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -952,7 +952,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -998,7 +998,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1044,7 +1044,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1067,7 +1067,7 @@ define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1090,7 +1090,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1136,7 +1136,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1205,7 +1205,7 @@ define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1228,7 +1228,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1251,7 +1251,7 @@ define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1274,7 +1274,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1297,7 +1297,7 @@ define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1343,7 +1343,7 @@ define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
index 62ce43bf2b74..028fe20920f3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -32,7 +32,7 @@ define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i16> %0,
   <vscale x 1 x half> %1,
   <vscale x 1 x i1> %2,
@@ -56,7 +56,7 @@ define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -78,7 +78,7 @@ define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i16> %0,
   <vscale x 2 x half> %1,
   <vscale x 2 x i1> %2,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i16> %0,
   <vscale x 4 x half> %1,
   <vscale x 4 x i1> %2,
@@ -148,7 +148,7 @@ define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -170,7 +170,7 @@ define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i16> %0,
   <vscale x 8 x half> %1,
   <vscale x 8 x i1> %2,
@@ -194,7 +194,7 @@ define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -216,7 +216,7 @@ define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x i16> %0,
   <vscale x 16 x half> %1,
   <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@ define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -262,7 +262,7 @@ define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x i16> %0,
   <vscale x 32 x half> %1,
   <vscale x 32 x i1> %2,
@@ -286,7 +286,7 @@ define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -308,7 +308,7 @@ define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i32> %0,
   <vscale x 1 x float> %1,
   <vscale x 1 x i1> %2,
@@ -332,7 +332,7 @@ define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -354,7 +354,7 @@ define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i32> %0,
   <vscale x 2 x float> %1,
   <vscale x 2 x i1> %2,
@@ -378,7 +378,7 @@ define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -400,7 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i32> %0,
   <vscale x 4 x float> %1,
   <vscale x 4 x i1> %2,
@@ -424,7 +424,7 @@ define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -446,7 +446,7 @@ define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i32> %0,
   <vscale x 8 x float> %1,
   <vscale x 8 x i1> %2,
@@ -470,7 +470,7 @@ define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -492,7 +492,7 @@ define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x i32> %0,
   <vscale x 16 x float> %1,
   <vscale x 16 x i1> %2,
@@ -516,7 +516,7 @@ define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
   i32 %1) nounwind {
 entry:
@@ -538,7 +538,7 @@ define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i64> %0,
   <vscale x 1 x double> %1,
   <vscale x 1 x i1> %2,
@@ -562,7 +562,7 @@ define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
   i32 %1) nounwind {
 entry:
@@ -584,7 +584,7 @@ define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i64> %0,
   <vscale x 2 x double> %1,
   <vscale x 2 x i1> %2,
@@ -608,7 +608,7 @@ define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
   i32 %1) nounwind {
 entry:
@@ -630,7 +630,7 @@ define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i64> %0,
   <vscale x 4 x double> %1,
   <vscale x 4 x i1> %2,
@@ -654,7 +654,7 @@ define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
   i32 %1) nounwind {
 entry:
@@ -676,7 +676,7 @@ define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i64> %0,
   <vscale x 8 x double> %1,
   <vscale x 8 x i1> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
index 00977cb42dd9..2b7f671059e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -32,7 +32,7 @@ define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i16> %0,
   <vscale x 1 x half> %1,
   <vscale x 1 x i1> %2,
@@ -56,7 +56,7 @@ define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -78,7 +78,7 @@ define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i16> %0,
   <vscale x 2 x half> %1,
   <vscale x 2 x i1> %2,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i16> %0,
   <vscale x 4 x half> %1,
   <vscale x 4 x i1> %2,
@@ -148,7 +148,7 @@ define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -170,7 +170,7 @@ define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i16> %0,
   <vscale x 8 x half> %1,
   <vscale x 8 x i1> %2,
@@ -194,7 +194,7 @@ define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -216,7 +216,7 @@ define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x i16> %0,
   <vscale x 16 x half> %1,
   <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@ define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -262,7 +262,7 @@ define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x i16> %0,
   <vscale x 32 x half> %1,
   <vscale x 32 x i1> %2,
@@ -286,7 +286,7 @@ define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -308,7 +308,7 @@ define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i32> %0,
   <vscale x 1 x float> %1,
   <vscale x 1 x i1> %2,
@@ -332,7 +332,7 @@ define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -354,7 +354,7 @@ define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i32> %0,
   <vscale x 2 x float> %1,
   <vscale x 2 x i1> %2,
@@ -378,7 +378,7 @@ define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -400,7 +400,7 @@ define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i32> %0,
   <vscale x 4 x float> %1,
   <vscale x 4 x i1> %2,
@@ -424,7 +424,7 @@ define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -446,7 +446,7 @@ define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i32> %0,
   <vscale x 8 x float> %1,
   <vscale x 8 x i1> %2,
@@ -470,7 +470,7 @@ define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -492,7 +492,7 @@ define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x i32> %0,
   <vscale x 16 x float> %1,
   <vscale x 16 x i1> %2,
@@ -516,7 +516,7 @@ define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -538,7 +538,7 @@ define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i64> %0,
   <vscale x 1 x double> %1,
   <vscale x 1 x i1> %2,
@@ -562,7 +562,7 @@ define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -584,7 +584,7 @@ define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i64> %0,
   <vscale x 2 x double> %1,
   <vscale x 2 x i1> %2,
@@ -608,7 +608,7 @@ define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -630,7 +630,7 @@ define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i64> %0,
   <vscale x 4 x double> %1,
   <vscale x 4 x i1> %2,
@@ -654,7 +654,7 @@ define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -676,7 +676,7 @@ define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i64> %0,
   <vscale x 8 x double> %1,
   <vscale x 8 x i1> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
index af453fa1cf7b..ecf15bfea677 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
index a7c94da2ba8f..dec3d3c9f6fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
index f7bd9661b666..2d2352638ab6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
index 7d1533394542..bdcf80cc54da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
index b7f66f0d24ab..dda4f9f15584 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
index 89ef4c7e3483..98bc96fe5fa5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
index 6200daf13cac..0a0f817ebe3f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
index 6ac117be0129..e80ba354d3bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
index 63eb153ca62e..40ca84ca7c0e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
index 09fe01738895..b0421e471349 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
index 175289c08013..a857d906ad87 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
index 6b0e1a635f56..a061c7d782c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
index 884e8a7adebc..c2c935553d34 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
index 9fe5ebb07b8b..cb2a32cdf330 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16(<vscale x 1 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_vv_nxv2f16_nxv2f16(<vscale x 2 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_vv_nxv4f16_nxv4f16(<vscale x 4 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_vv_nxv8f16_nxv8f16(<vscale x 8 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_vv_nxv16f16_nxv16f16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_vv_nxv32f16_nxv32f16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16(<vscale x
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_vv_nxv1f32_nxv1f32(<vscale x 1 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_vv_nxv2f32_nxv2f32(<vscale x 2 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_vv_nxv4f32_nxv4f32(<vscale x 4 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_vv_nxv8f32_nxv8f32(<vscale x 8 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_vv_nxv16f32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32(<vscale
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_vv_nxv1f64_nxv1f64(<vscale x 1 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_vv_nxv2f64_nxv2f64(<vscale x 2 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_vv_nxv4f64_nxv4f64(<vscale x 4 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_vv_nxv8f64_nxv8f64(<vscale x 8 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64(<vscale x
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_f16(<vscale x 1 x half> %
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_f16(<vscale x 1 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_f16(<vscale x 2 x half> %
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_f16(<vscale x 2 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_f16(<vscale x 4 x half> %
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_f16(<vscale x 4 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_f16(<vscale x 8 x half> %
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_f16(<vscale x 8 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_f16(<vscale x 16 x half
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_f16(<vscale x 16 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_f16(<vscale x 32 x half
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_f16(<vscale x 32 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_f32(<vscale x 1 x float>
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_f32(<vscale x 1 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_f32(<vscale x 2 x float>
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_f32(<vscale x 2 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_f32(<vscale x 4 x float>
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_f32(<vscale x 4 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_f32(<vscale x 8 x float>
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_f32(<vscale x 8 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_f32(<vscale x 16 x flo
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_f32(<vscale x 16
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_vf_nxv1f64_f64(<vscale x 1 x doubl
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x double> @intrinsic_vfdiv_mask_vf_nxv1f64_f64(<vscale x 1 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_vf_nxv2f64_f64(<vscale x 2 x doubl
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@ define <vscale x 2 x double> @intrinsic_vfdiv_mask_vf_nxv2f64_f64(<vscale x 2 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_vf_nxv4f64_f64(<vscale x 4 x doubl
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@ define <vscale x 4 x double> @intrinsic_vfdiv_mask_vf_nxv4f64_f64(<vscale x 4 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_vf_nxv8f64_f64(<vscale x 8 x doubl
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@ define <vscale x 8 x double> @intrinsic_vfdiv_mask_vf_nxv8f64_f64(<vscale x 8 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
index dfe2dd4fa847..88b6f0682423 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare i32 @llvm.riscv.vfirst.i32.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -10,7 +10,7 @@ define i32 @intrinsic_vfirst_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define i32 @intrinsic_vfirst_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -50,7 +50,7 @@ define i32 @intrinsic_vfirst_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -71,7 +71,7 @@ define i32 @intrinsic_vfirst_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -90,7 +90,7 @@ define i32 @intrinsic_vfirst_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -111,7 +111,7 @@ define i32 @intrinsic_vfirst_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -130,7 +130,7 @@ define i32 @intrinsic_vfirst_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -151,7 +151,7 @@ define i32 @intrinsic_vfirst_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -170,7 +170,7 @@ define i32 @intrinsic_vfirst_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -191,7 +191,7 @@ define i32 @intrinsic_vfirst_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -210,7 +210,7 @@ define i32 @intrinsic_vfirst_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -231,7 +231,7 @@ define i32 @intrinsic_vfirst_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -250,7 +250,7 @@ define i32 @intrinsic_vfirst_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -271,7 +271,7 @@ define i32 @intrinsic_vfirst_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
index 08526b83331f..d5edf79f0eeb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare i64 @llvm.riscv.vfirst.i64.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -10,7 +10,7 @@ define i64 @intrinsic_vfirst_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define i64 @intrinsic_vfirst_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -50,7 +50,7 @@ define i64 @intrinsic_vfirst_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -71,7 +71,7 @@ define i64 @intrinsic_vfirst_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -90,7 +90,7 @@ define i64 @intrinsic_vfirst_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -111,7 +111,7 @@ define i64 @intrinsic_vfirst_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -130,7 +130,7 @@ define i64 @intrinsic_vfirst_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -151,7 +151,7 @@ define i64 @intrinsic_vfirst_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -170,7 +170,7 @@ define i64 @intrinsic_vfirst_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -191,7 +191,7 @@ define i64 @intrinsic_vfirst_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -210,7 +210,7 @@ define i64 @intrinsic_vfirst_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -231,7 +231,7 @@ define i64 @intrinsic_vfirst_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -250,7 +250,7 @@ define i64 @intrinsic_vfirst_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -271,7 +271,7 @@ define i64 @intrinsic_vfirst_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
index cce88c59e255..57e274affcd5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x double>  @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@ define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@ define <vscale x 2 x double>  @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
index 0c62230c48ba..55476cacce7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 1 x double>  @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 2 x double>  @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x double>  @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
index 61f3f9332adb..50b335668078 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x double>  @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@ define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@ define <vscale x 2 x double>  @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
index 3c6c54031d02..88f5c38698c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 1 x double>  @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 2 x double>  @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x double>  @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
index 967c7351e272..0140b732f0bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
index d6f60fdb4799..0af1d1a359ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x double> @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@ define <vscale x 2 x double> @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@ define <vscale x 4 x double> @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@ define <vscale x 8 x double> @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
index ade80c381afa..4b1dbb594155 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x half> @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -80,7 +80,7 @@ define <vscale x 2 x half> @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x half> @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -125,7 +125,7 @@ define <vscale x 4 x half> @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x half> @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@ define <vscale x 8 x half> @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x half> @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -215,7 +215,7 @@ define <vscale x 16 x half> @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x half> @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -260,7 +260,7 @@ define <vscale x 32 x half> @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x float> @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x float> @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x float> @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x float> @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x float> @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x float> @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x float> @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x float> @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x float> @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -485,7 +485,7 @@ define <vscale x 16 x float> @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x double> @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -534,7 +534,7 @@ define <vscale x 1 x double> @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -556,7 +556,7 @@ define <vscale x 2 x double> @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -583,7 +583,7 @@ define <vscale x 2 x double> @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 4 x double> @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -632,7 +632,7 @@ define <vscale x 4 x double> @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -654,7 +654,7 @@ define <vscale x 8 x double> @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -681,7 +681,7 @@ define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -697,7 +697,7 @@ define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -713,7 +713,7 @@ define <vscale x 2 x half> @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -729,7 +729,7 @@ define <vscale x 4 x half> @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -745,7 +745,7 @@ define <vscale x 8 x half> @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x half> @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -777,7 +777,7 @@ define <vscale x 32 x half> @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -793,7 +793,7 @@ define <vscale x 1 x float> @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -809,7 +809,7 @@ define <vscale x 2 x float> @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -825,7 +825,7 @@ define <vscale x 4 x float> @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -841,7 +841,7 @@ define <vscale x 8 x float> @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -857,7 +857,7 @@ define <vscale x 16 x float> @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -873,7 +873,7 @@ define <vscale x 1 x double> @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -889,7 +889,7 @@ define <vscale x 2 x double> @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -905,7 +905,7 @@ define <vscale x 4 x double> @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -921,7 +921,7 @@ define <vscale x 8 x double> @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
index ce80e8baf763..042b8e36e6f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x half> @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -80,7 +80,7 @@ define <vscale x 2 x half> @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x half> @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -125,7 +125,7 @@ define <vscale x 4 x half> @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x half> @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@ define <vscale x 8 x half> @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x half> @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -215,7 +215,7 @@ define <vscale x 16 x half> @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x half> @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -260,7 +260,7 @@ define <vscale x 32 x half> @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x float> @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x float> @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x float> @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x float> @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x float> @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x float> @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x float> @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x float> @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x float> @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -485,7 +485,7 @@ define <vscale x 16 x float> @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x double> @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -530,7 +530,7 @@ define <vscale x 1 x double> @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x double> @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -575,7 +575,7 @@ define <vscale x 2 x double> @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x double> @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -620,7 +620,7 @@ define <vscale x 4 x double> @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x double> @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -665,7 +665,7 @@ define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -681,7 +681,7 @@ define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -697,7 +697,7 @@ define <vscale x 2 x half> @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x half> @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -729,7 +729,7 @@ define <vscale x 8 x half> @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -745,7 +745,7 @@ define <vscale x 16 x half> @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -761,7 +761,7 @@ define <vscale x 32 x half> @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -777,7 +777,7 @@ define <vscale x 1 x float> @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -793,7 +793,7 @@ define <vscale x 2 x float> @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -809,7 +809,7 @@ define <vscale x 4 x float> @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -825,7 +825,7 @@ define <vscale x 8 x float> @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -841,7 +841,7 @@ define <vscale x 16 x float> @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -857,7 +857,7 @@ define <vscale x 1 x double> @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -873,7 +873,7 @@ define <vscale x 2 x double> @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -889,7 +889,7 @@ define <vscale x 4 x double> @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -905,7 +905,7 @@ define <vscale x 8 x double> @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
index 6e75e42d7937..376b95a327ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
index 4365034c7c7f..a64f8907dfa0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x double> @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@ define <vscale x 2 x double> @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@ define <vscale x 4 x double> @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@ define <vscale x 8 x double> @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
index c7be66b28b34..43788ff93bb3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@ define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
index e138e18df39c..b2ad8d3b1f71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
index 333fccedf9ed..732e4107815b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@ define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
index 3badef2584bd..05ceede21b8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 1 x double>  @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 2 x double>  @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x double>  @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
index 1a8521f872f1..574e49d9febc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
index b285e36b3f85..4f514ad80139 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16(<vscale x 1 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16(<vscale x 2 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16(<vscale x 4 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16(<vscale x 8 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16(<vscale x
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32(<vscale x 1 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32(<vscale x 2 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32(<vscale x 4 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32(<vscale x 8 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32(<vscale
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64(<vscale x 1 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64(<vscale x 2 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64(<vscale x 4 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64(<vscale x 8 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64(<vscale x
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_f16(<vscale x 1 x half> %
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_f16(<vscale x 1 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_f16(<vscale x 2 x half> %
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_f16(<vscale x 2 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_f16(<vscale x 4 x half> %
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_f16(<vscale x 4 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_f16(<vscale x 8 x half> %
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_f16(<vscale x 8 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_f16(<vscale x 16 x half
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_f16(<vscale x 16 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_f16(<vscale x 32 x half
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_f16(<vscale x 32 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_f32(<vscale x 1 x float>
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_f32(<vscale x 1 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_f32(<vscale x 2 x float>
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_f32(<vscale x 2 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_f32(<vscale x 4 x float>
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_f32(<vscale x 4 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_f32(<vscale x 8 x float>
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_f32(<vscale x 8 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_f32(<vscale x 16 x flo
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_f32(<vscale x 16
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_f64(<vscale x 1 x doubl
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_f64(<vscale x 1 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_f64(<vscale x 2 x doubl
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@ define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_f64(<vscale x 2 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_f64(<vscale x 4 x doubl
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@ define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_f64(<vscale x 4 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_f64(<vscale x 8 x doubl
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@ define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_f64(<vscale x 8 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
index c6567d2a751c..f91b0bdef680 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -target-abi ilp32d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
   half,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half %0,
@@ -28,7 +28,7 @@ define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half %0,
@@ -46,7 +46,7 @@ define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half %0,
@@ -64,7 +64,7 @@ define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half %0,
@@ -82,7 +82,7 @@ define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half %0,
@@ -100,7 +100,7 @@ define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half %0,
@@ -118,7 +118,7 @@ define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float %0,
@@ -136,7 +136,7 @@ define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float %0,
@@ -154,7 +154,7 @@ define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float %0,
@@ -172,7 +172,7 @@ define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float %0,
@@ -208,7 +208,7 @@ define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double %0,
@@ -226,7 +226,7 @@ define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double %0,
@@ -244,7 +244,7 @@ define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double %0,
@@ -262,7 +262,7 @@ define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
@@ -290,7 +290,7 @@ define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
@@ -304,7 +304,7 @@ define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
@@ -318,7 +318,7 @@ define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
@@ -332,7 +332,7 @@ define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
@@ -346,7 +346,7 @@ define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
@@ -360,7 +360,7 @@ define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
@@ -374,7 +374,7 @@ define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
@@ -388,7 +388,7 @@ define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
@@ -402,7 +402,7 @@ define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
@@ -416,7 +416,7 @@ define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
@@ -430,7 +430,7 @@ define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
@@ -444,7 +444,7 @@ define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
@@ -458,7 +458,7 @@ define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
@@ -472,7 +472,7 @@ define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
index 19df84434616..17e393bd15f5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -target-abi lp64d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
   half,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i64 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half %0,
@@ -28,7 +28,7 @@ define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i64 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half %0,
@@ -46,7 +46,7 @@ define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i64 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half %0,
@@ -64,7 +64,7 @@ define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i64 %1) nounwi
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half %0,
@@ -82,7 +82,7 @@ define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i64 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half %0,
@@ -100,7 +100,7 @@ define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i64 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half %0,
@@ -118,7 +118,7 @@ define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i64 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float %0,
@@ -136,7 +136,7 @@ define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i64 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float %0,
@@ -154,7 +154,7 @@ define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i64 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float %0,
@@ -172,7 +172,7 @@ define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i64 %1) noun
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i64 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float %0,
@@ -208,7 +208,7 @@ define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i64 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double %0,
@@ -226,7 +226,7 @@ define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i64 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double %0,
@@ -244,7 +244,7 @@ define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i64 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double %0,
@@ -262,7 +262,7 @@ define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i64 %1) no
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
@@ -290,7 +290,7 @@ define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
@@ -304,7 +304,7 @@ define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
@@ -318,7 +318,7 @@ define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
@@ -332,7 +332,7 @@ define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
@@ -346,7 +346,7 @@ define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
@@ -360,7 +360,7 @@ define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
@@ -374,7 +374,7 @@ define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
@@ -388,7 +388,7 @@ define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
@@ -402,7 +402,7 @@ define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
@@ -416,7 +416,7 @@ define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
@@ -430,7 +430,7 @@ define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
@@ -444,7 +444,7 @@ define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
@@ -458,7 +458,7 @@ define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
@@ -472,7 +472,7 @@ define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
index ec49899b6fa8..54a464ed7844 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
index 62d3433ab7b8..f8edd416d9f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
index cdc4494eb6a5..ed6ec6f217f2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
index 0ae8e52e2ce5..680d2145aad3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
index ae6326f4302c..5890c7dcb290 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
index 3d94b5b65420..c94b3d54535a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
index a121d8a4a603..c28ed788dbcf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
index e9ce1f49eb60..1169526048ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64(
     <vscale x 8 x float> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
index c8b5bf23a441..ff99af7d13dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
index 6d3f75388483..817d96442120 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
index 313ccb495913..043538343a70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
index 0673d756f819..4bbb96dbcb01 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
index eec28993151e..77d868b01147 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x ha
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x ha
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x ha
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x ha
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
index d9ffdea610ff..aae775531bf6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x ha
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x ha
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x ha
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x ha
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
index 2d7140f02bd4..dcb2c1ee3bdd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x h
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x h
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x h
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x h
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
index 7b0d3923562f..f4459383bd05 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x h
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x h
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x h
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x h
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
index c572d45981eb..563dbbe0194c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@ define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
index 693c736cd347..51b9dd1ccf40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
index c91996f91d02..1cb14fea586f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@ define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
index afd7e7cca951..95872af9c919 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
index 1f125132bf60..718cd37fb3d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@ define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
index 84b42e6e10d1..33dada4c6428 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
index ae477b705e3c..edc840c3a417 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@ define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
index d554b4bfd456..db2a16521cde 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 1 x half>  @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@ define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@ define <vscale x 2 x half>  @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@ define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x half>  @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@ define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@ define <vscale x 8 x half>  @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@ define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@ define <vscale x 16 x half>  @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x float>  @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@ define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@ define <vscale x 2 x float>  @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@ define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@ define <vscale x 4 x float>  @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@ define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@ define <vscale x 8 x float>  @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 1 x double>  @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 2 x double>  @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x double>  @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
index 36b095ed5c83..77ce843de924 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -522,7 +522,7 @@ define <vscale x 1 x double> @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -549,7 +549,7 @@ define <vscale x 1 x double> @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x double> @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -603,7 +603,7 @@ define <vscale x 2 x double> @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -630,7 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -657,7 +657,7 @@ define <vscale x 4 x double> @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -684,7 +684,7 @@ define <vscale x 8 x double> @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -711,7 +711,7 @@ define <vscale x 8 x double> @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
index 1cf817644e32..52d9bd83a341 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_f16(<vscale x 1 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_f16(<vscale x 1 x h
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_f16(<vscale x 2 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_f16(<vscale x 2 x h
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_f16(<vscale x 4 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_f16(<vscale x 4 x h
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_f16(<vscale x 8 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_f16(<vscale x 8 x h
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_f16(<vscale x 16 x hal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_f16(<vscale x 16
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_f16(<vscale x 32 x hal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_f16(<vscale x 32
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_f32(<vscale x 1 x float
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_f32(<vscale x 1 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_f32(<vscale x 2 x float
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_f32(<vscale x 2 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_f32(<vscale x 4 x float
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_f32(<vscale x 4 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_f32(<vscale x 8 x float
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_f32(<vscale x 8 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_f32(<vscale x 16 x fl
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_f32(<vscale x 16
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfrdiv_vf_nxv1f64_f64(<vscale x 1 x doub
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfrdiv_mask_vf_nxv1f64_f64(<vscale x 1 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x double> @intrinsic_vfrdiv_vf_nxv2f64_f64(<vscale x 2 x doub
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x double> @intrinsic_vfrdiv_mask_vf_nxv2f64_f64(<vscale x 2 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x double> @intrinsic_vfrdiv_vf_nxv4f64_f64(<vscale x 4 x doub
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x double> @intrinsic_vfrdiv_mask_vf_nxv4f64_f64(<vscale x 4 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x double> @intrinsic_vfrdiv_vf_nxv8f64_f64(<vscale x 8 x doub
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x double> @intrinsic_vfrdiv_mask_vf_nxv8f64_f64(<vscale x 8 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
index 244903fadb32..cb61ddad838d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x half> @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
     <vscale x 32 x half> %1,
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x float> @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -290,7 +290,7 @@ define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -330,7 +330,7 @@ define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x float> @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -370,7 +370,7 @@ define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x float> @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -410,7 +410,7 @@ define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x float> @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
     <vscale x 16 x float> %1,
@@ -450,7 +450,7 @@ define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x double> @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -490,7 +490,7 @@ define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x double> @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -530,7 +530,7 @@ define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x double> @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -570,7 +570,7 @@ define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
     <vscale x 8 x double> %1,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
index 7b24fb9b0238..786d3635c807 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x half> @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
     <vscale x 32 x half> %1,
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x float> @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -290,7 +290,7 @@ define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -330,7 +330,7 @@ define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x float> @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -370,7 +370,7 @@ define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x float> @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -410,7 +410,7 @@ define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x float> @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
     <vscale x 16 x float> %1,
@@ -450,7 +450,7 @@ define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x double> @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -490,7 +490,7 @@ define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x double> @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -530,7 +530,7 @@ define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x double> @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -570,7 +570,7 @@ define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
     <vscale x 8 x double> %1,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
index 02f158664288..b9366cd8fb8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
index 952136986b9e..947db3804a31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
index 6a4ee69ce220..be34c49923c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
index 2a049c2dee8d..c34e654ec17b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
index ba9c55fa6eeb..64f498c429d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
index 8b08fe516e48..406661241e78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
index 3317144222e0..74811df34d3e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
index 9b706b59c92e..5d6397e35474 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
index 2740ecf3acff..79154fdd724b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16(<vscale x 2 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16(<vscale x 4 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16(<vscale x 8 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x half> @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.mask.nxv32f16(
     <vscale x 32 x half> %1,
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x float> @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -290,7 +290,7 @@ define <vscale x 2 x float> @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -330,7 +330,7 @@ define <vscale x 4 x float> @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x float> @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -370,7 +370,7 @@ define <vscale x 8 x float> @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x float> @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -410,7 +410,7 @@ define <vscale x 16 x float> @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x float> @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32(
     <vscale x 16 x float> %1,
@@ -450,7 +450,7 @@ define <vscale x 1 x double> @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x double> @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -490,7 +490,7 @@ define <vscale x 2 x double> @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x double> @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -530,7 +530,7 @@ define <vscale x 4 x double> @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x double> @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -570,7 +570,7 @@ define <vscale x 8 x double> @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64(
     <vscale x 8 x double> %1,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
index 3ea0f1d9eb6a..41053b6d762f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16(<vscale x 2 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16(<vscale x 4 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16(<vscale x 8 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x half> @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.mask.nxv32f16(
     <vscale x 32 x half> %1,
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@ define <vscale x 1 x float> @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -290,7 +290,7 @@ define <vscale x 2 x float> @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@ define <vscale x 2 x float> @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -330,7 +330,7 @@ define <vscale x 4 x float> @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x float> @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -370,7 +370,7 @@ define <vscale x 8 x float> @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@ define <vscale x 8 x float> @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -410,7 +410,7 @@ define <vscale x 16 x float> @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 16 x float> @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32(
     <vscale x 16 x float> %1,
@@ -450,7 +450,7 @@ define <vscale x 1 x double> @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@ define <vscale x 1 x double> @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -490,7 +490,7 @@ define <vscale x 2 x double> @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@ define <vscale x 2 x double> @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -530,7 +530,7 @@ define <vscale x 4 x double> @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x double> @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -570,7 +570,7 @@ define <vscale x 8 x double> @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@ define <vscale x 8 x double> @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64(
     <vscale x 8 x double> %1,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
index 90da0d19a2e6..62886f6924a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -522,7 +522,7 @@ define <vscale x 1 x double> @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -549,7 +549,7 @@ define <vscale x 1 x double> @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x double> @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -603,7 +603,7 @@ define <vscale x 2 x double> @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -630,7 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -657,7 +657,7 @@ define <vscale x 4 x double> @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -684,7 +684,7 @@ define <vscale x 8 x double> @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -711,7 +711,7 @@ define <vscale x 8 x double> @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
index be755547ae2f..a398630d7aeb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -13,7 +13,7 @@ define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -36,7 +36,7 @@ define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -59,7 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -82,7 +82,7 @@ define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -105,7 +105,7 @@ define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -128,7 +128,7 @@ define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -151,7 +151,7 @@ define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -174,7 +174,7 @@ define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -197,7 +197,7 @@ define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -220,7 +220,7 @@ define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -243,7 +243,7 @@ define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -266,7 +266,7 @@ define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -289,7 +289,7 @@ define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -312,7 +312,7 @@ define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -335,7 +335,7 @@ define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -358,7 +358,7 @@ define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -381,7 +381,7 @@ define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -404,7 +404,7 @@ define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -427,7 +427,7 @@ define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -450,7 +450,7 @@ define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -473,7 +473,7 @@ define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -542,7 +542,7 @@ define <vscale x 1 x double> @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -565,7 +565,7 @@ define <vscale x 2 x double> @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -588,7 +588,7 @@ define <vscale x 2 x double> @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -611,7 +611,7 @@ define <vscale x 4 x double> @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -634,7 +634,7 @@ define <vscale x 4 x double> @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -657,7 +657,7 @@ define <vscale x 8 x double> @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64(<vscale x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -680,7 +680,7 @@ define <vscale x 8 x double> @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64(<vsca
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
index 80c04b0bc8c0..394f653d1525 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f3
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64(<
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32(<vscal
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64(<vsca
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
index 733e7a8f61c9..b6c4d7f3d6c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16(<vscale x 1 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16(<vscale x 2 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16(<vscale x 4 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16(<vscale x 8 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16(<vscale
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32(<vscale x 1 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32(<vscale x 2 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32(<vscale x 4 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32(<vscale x 8 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32(<vscale
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64(<vscale x
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_f16(<vscale x 1 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_f16(<vscale x 1 x h
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_f16(<vscale x 2 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_f16(<vscale x 2 x h
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_f16(<vscale x 4 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_f16(<vscale x 4 x h
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_f16(<vscale x 8 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_f16(<vscale x 8 x h
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_f16(<vscale x 16 x hal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_f16(<vscale x 16
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_f16(<vscale x 32 x hal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_f16(<vscale x 32
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_f32(<vscale x 1 x float
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_f32(<vscale x 1 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_f32(<vscale x 2 x float
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_f32(<vscale x 2 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_f32(<vscale x 4 x float
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_f32(<vscale x 4 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_f32(<vscale x 8 x float
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_f32(<vscale x 8 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_f32(<vscale x 16 x fl
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_f32(<vscale x 16
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_vf_nxv1f64_f64(<vscale x 1 x doub
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vf_nxv1f64_f64(<vscale x 1 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_vf_nxv2f64_f64(<vscale x 2 x doub
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vf_nxv2f64_f64(<vscale x 2 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_vf_nxv4f64_f64(<vscale x 4 x doub
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vf_nxv4f64_f64(<vscale x 4 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_vf_nxv8f64_f64(<vscale x 8 x doub
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vf_nxv8f64_f64(<vscale x 8 x
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
index 087933bb80e9..90b74bde2a16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f1
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64(
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
index 392d16bca022..1283a2023f5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16(<vscale x 1 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16(<vscale x 2 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16(<vscale x 4 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16(<vscale x 8 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16(<vscale
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32(<vscal
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64(<vscale
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_vf_nxv1f16_f16(<vscale x 1 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_vf_nxv2f16_f16(<vscale x 2 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_vf_nxv4f16_f16(<vscale x 4 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_vf_nxv8f16_f16(<vscale x 8 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_vf_nxv16f16_f16(<vscale x 16 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16(<vscale x 16
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_vf_nxv32f16_f16(<vscale x 32 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16(<vscale x 32
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_vf_nxv1f32_f32(<vscale x 1 x floa
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32(<vscale x 1 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_vf_nxv2f32_f32(<vscale x 2 x floa
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32(<vscale x 2 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_vf_nxv4f32_f32(<vscale x 4 x floa
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32(<vscale x 4 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_vf_nxv8f32_f32(<vscale x 8 x floa
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32(<vscale x 8 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_vf_nxv16f32_f32(<vscale x 16 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_vf_nxv1f64_f64(<vscale x 1 x dou
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vf_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_vf_nxv2f64_f64(<vscale x 2 x dou
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vf_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_vf_nxv4f64_f64(<vscale x 4 x dou
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vf_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_vf_nxv8f64_f64(<vscale x 8 x dou
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vf_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
index ee481d2a717e..3d31a50b1d78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f1
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64(
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
index 17c262c89693..4484d5abe4cd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16(<vscale x 1 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16(<vscale x 2 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16(<vscale x 4 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16(<vscale x 8 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16(<vscale
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32(<vscal
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64(<vscale
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_vf_nxv1f16_f16(<vscale x 1 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_vf_nxv2f16_f16(<vscale x 2 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_vf_nxv4f16_f16(<vscale x 4 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_vf_nxv8f16_f16(<vscale x 8 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_vf_nxv16f16_f16(<vscale x 16 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16(<vscale x 16
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_vf_nxv32f16_f16(<vscale x 32 x ha
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16(<vscale x 32
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_vf_nxv1f32_f32(<vscale x 1 x floa
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32(<vscale x 1 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_vf_nxv2f32_f32(<vscale x 2 x floa
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32(<vscale x 2 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_vf_nxv4f32_f32(<vscale x 4 x floa
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32(<vscale x 4 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_vf_nxv8f32_f32(<vscale x 8 x floa
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32(<vscale x 8 x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_vf_nxv16f32_f32(<vscale x 16 x f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_vf_nxv1f64_f64(<vscale x 1 x dou
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vf_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_vf_nxv2f64_f64(<vscale x 2 x dou
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@ define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vf_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_vf_nxv4f64_f64(<vscale x 4 x dou
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@ define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vf_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_vf_nxv8f64_f64(<vscale x 8 x dou
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@ define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vf_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
index ec5f182b63c0..dbc23c27a6a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -522,7 +522,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -549,7 +549,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -603,7 +603,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -630,7 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -657,7 +657,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -684,7 +684,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -711,7 +711,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
index 882418ae4246..a8b8daecbdd3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f1
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vsc
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
index be07b017559a..68a0d02b4841 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -13,7 +13,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -36,7 +36,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -60,7 +60,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -83,7 +83,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -107,7 +107,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -177,7 +177,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -201,7 +201,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -224,7 +224,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -248,7 +248,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -271,7 +271,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -295,7 +295,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -365,7 +365,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -389,7 +389,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -412,7 +412,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -436,7 +436,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -459,7 +459,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -483,7 +483,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -506,7 +506,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -534,7 +534,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -561,7 +561,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -589,7 +589,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -616,7 +616,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -644,7 +644,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -671,7 +671,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -699,7 +699,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -726,7 +726,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
index 0fb22151843d..7c1ac3028fd2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -13,7 +13,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -36,7 +36,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -60,7 +60,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -83,7 +83,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -107,7 +107,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -177,7 +177,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -201,7 +201,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -224,7 +224,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -248,7 +248,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -271,7 +271,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -295,7 +295,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -365,7 +365,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -389,7 +389,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -412,7 +412,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -436,7 +436,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -459,7 +459,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<v
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -483,7 +483,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -506,7 +506,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -530,7 +530,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -553,7 +553,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -577,7 +577,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -600,7 +600,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -624,7 +624,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -647,7 +647,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -671,7 +671,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -694,7 +694,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
index c27471248bb5..4c405352fac9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(<vscale x 2 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x half> @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@ define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(<vscale x 4 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@ define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(<vscale x 8 x hal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x half> @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@ define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x half> @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@ define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
     <vscale x 32 x half> %0,
@@ -228,7 +228,7 @@ define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(<vscale x 1 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -248,7 +248,7 @@ define <vscale x 1 x float> @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -268,7 +268,7 @@ define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(<vscale x 2 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -308,7 +308,7 @@ define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(<vscale x 4 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -328,7 +328,7 @@ define <vscale x 4 x float> @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -348,7 +348,7 @@ define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(<vscale x 8 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -368,7 +368,7 @@ define <vscale x 8 x float> @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -388,7 +388,7 @@ define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
     <vscale x 16 x float> %0,
@@ -406,7 +406,7 @@ define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(<vscale x 1 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double> @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -446,7 +446,7 @@ define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(<vscale x 2 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -466,7 +466,7 @@ define <vscale x 2 x double> @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -486,7 +486,7 @@ define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(<vscale x 4 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -506,7 +506,7 @@ define <vscale x 4 x double> @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -526,7 +526,7 @@ define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(<vscale x 8 x d
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
index 454a1e533253..04d411e49cf7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -32,7 +32,7 @@ define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
   <vscale x 1 x half> %1,
   <vscale x 1 x i1> %2,
@@ -56,7 +56,7 @@ define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -78,7 +78,7 @@ define <vscale x 2 x half> @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
   <vscale x 2 x half> %1,
   <vscale x 2 x i1> %2,
@@ -102,7 +102,7 @@ define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -124,7 +124,7 @@ define <vscale x 4 x half> @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
   <vscale x 4 x half> %1,
   <vscale x 4 x i1> %2,
@@ -148,7 +148,7 @@ define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -170,7 +170,7 @@ define <vscale x 8 x half> @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
   <vscale x 8 x half> %1,
   <vscale x 8 x i1> %2,
@@ -194,7 +194,7 @@ define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -216,7 +216,7 @@ define <vscale x 16 x half> @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
   <vscale x 16 x half> %1,
   <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@ define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -262,7 +262,7 @@ define <vscale x 32 x half> @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
   <vscale x 32 x half> %1,
   <vscale x 32 x i1> %2,
@@ -286,7 +286,7 @@ define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -308,7 +308,7 @@ define <vscale x 1 x float> @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
   <vscale x 1 x float> %1,
   <vscale x 1 x i1> %2,
@@ -332,7 +332,7 @@ define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -354,7 +354,7 @@ define <vscale x 2 x float> @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
   <vscale x 2 x float> %1,
   <vscale x 2 x i1> %2,
@@ -378,7 +378,7 @@ define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -400,7 +400,7 @@ define <vscale x 4 x float> @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
   <vscale x 4 x float> %1,
   <vscale x 4 x i1> %2,
@@ -424,7 +424,7 @@ define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -446,7 +446,7 @@ define <vscale x 8 x float> @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
   <vscale x 8 x float> %1,
   <vscale x 8 x i1> %2,
@@ -470,7 +470,7 @@ define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -492,7 +492,7 @@ define <vscale x 16 x float> @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
   <vscale x 16 x float> %1,
   <vscale x 16 x i1> %2,
@@ -516,7 +516,7 @@ define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -538,7 +538,7 @@ define <vscale x 1 x double> @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
   <vscale x 1 x double> %1,
   <vscale x 1 x i1> %2,
@@ -562,7 +562,7 @@ define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -584,7 +584,7 @@ define <vscale x 2 x double> @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
   <vscale x 2 x double> %1,
   <vscale x 2 x i1> %2,
@@ -608,7 +608,7 @@ define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -630,7 +630,7 @@ define <vscale x 4 x double> @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
   <vscale x 4 x double> %1,
   <vscale x 4 x i1> %2,
@@ -654,7 +654,7 @@ define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -676,7 +676,7 @@ define <vscale x 8 x double> @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
   <vscale x 8 x double> %1,
   <vscale x 8 x i1> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
index 7aa38c082d5d..dc139f861ebf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
index ee6608267735..1d115495d071 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -56,7 +56,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -78,7 +78,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -100,7 +100,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -122,7 +122,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -144,7 +144,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -166,7 +166,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -232,7 +232,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -255,7 +255,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -299,7 +299,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -321,7 +321,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -343,7 +343,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -365,7 +365,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -387,7 +387,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -409,7 +409,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -431,7 +431,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -476,7 +476,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -498,7 +498,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -520,7 +520,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -542,7 +542,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -586,7 +586,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -608,7 +608,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -630,7 +630,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -653,7 +653,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -676,7 +676,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -699,7 +699,7 @@ define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -722,7 +722,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -745,7 +745,7 @@ define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -768,7 +768,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -791,7 +791,7 @@ define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -814,7 +814,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -837,7 +837,7 @@ define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -860,7 +860,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -883,7 +883,7 @@ define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -906,7 +906,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -929,7 +929,7 @@ define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -952,7 +952,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -998,7 +998,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1044,7 +1044,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1067,7 +1067,7 @@ define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1090,7 +1090,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1136,7 +1136,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1205,7 +1205,7 @@ define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1228,7 +1228,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1251,7 +1251,7 @@ define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1274,7 +1274,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1297,7 +1297,7 @@ define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1343,7 +1343,7 @@ define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscal
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
index 1fda439491a8..242116336155 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
index ecebe7aecbd4..9ce400d43663 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
index c6283baa94c5..fc0322f47e09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -215,7 +215,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -238,7 +238,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -260,7 +260,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -283,7 +283,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -305,7 +305,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -328,7 +328,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -373,7 +373,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -396,7 +396,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -419,7 +419,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -442,7 +442,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -511,7 +511,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -534,7 +534,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -557,7 +557,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -580,7 +580,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -603,7 +603,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -626,7 +626,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -649,7 +649,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -672,7 +672,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -695,7 +695,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -718,7 +718,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -741,7 +741,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -764,7 +764,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -810,7 +810,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
index 425dc3797efa..3272e4ed8942 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -215,7 +215,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -238,7 +238,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -260,7 +260,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -283,7 +283,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -305,7 +305,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -328,7 +328,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -373,7 +373,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -396,7 +396,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -419,7 +419,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -442,7 +442,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -511,7 +511,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -534,7 +534,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -557,7 +557,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -580,7 +580,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -603,7 +603,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -626,7 +626,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -649,7 +649,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -672,7 +672,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -695,7 +695,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -718,7 +718,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -741,7 +741,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -764,7 +764,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -810,7 +810,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
index 45b8316fc1e6..6246970f9fdf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
index 3c5a037ce73b..fbe5d421bc0d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
index ee27b4b52317..19ccffd9ab33 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8(<vscale x 3
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
index dc4a30f8f23d..1f4b049c8e11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8(<vscale x 3
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
index c03fe3205363..6a5176564259 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
index 13d62b66a4a4..3a093496c514 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -236,7 +236,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -257,7 +257,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -277,7 +277,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -318,7 +318,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -339,7 +339,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -359,7 +359,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -400,7 +400,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -421,7 +421,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -441,7 +441,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -462,7 +462,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -482,7 +482,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -503,7 +503,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -523,7 +523,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -544,7 +544,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -585,7 +585,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
index decb30b41e5d..980f8d2558b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
index 07341b08ea53..315bcc0fdc5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
index 618b8e978592..0b70bc3f9aaa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
index 0538918d3827..fff49170e451 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
index 95ef4469f8bf..87c8f8e1172d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
index d21467111837..de7eb29c100c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
index 4439265988a3..7eb7877779db 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
index 4b7bd24e6416..6920b2787ca0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
index a64343178a4e..fe3814468ce0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@ define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@ define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@ define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@ define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<v
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@ define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@ define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@ define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
index 367ebdcf882a..aea0274727c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@ define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@ define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@ define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@ define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<v
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@ define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@ define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@ define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
index 7c04a9c39383..28922268a3d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@ define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@ define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@ define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@ define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<v
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@ define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@ define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@ define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
index acc254135142..92c404d71599 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x float>  @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 2 x float>  @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@ define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@ define <vscale x 4 x float>  @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@ define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 8 x float>  @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@ define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@ define <vscale x 16 x float>  @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@ define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<v
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@ define <vscale x 1 x double>  @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@ define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@ define <vscale x 2 x double>  @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@ define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@ define <vscale x 4 x double>  @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@ define <vscale x 8 x double>  @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@ define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vsc
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
index cf42d05e0e53..5c54927cf74a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
index 6392531ca470..f69e29e1e346 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@ define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@ define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@ define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@ define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@ define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@ define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@ define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@ define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
index 0ed393ac4791..cc6a3d2bc3f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@ define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@ define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@ define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@ define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@ define <vscale x 1 x double> @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@ define <vscale x 2 x double> @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 4 x double> @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@ define <vscale x 8 x double> @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
index 50533c49aa07..76e4f1e47e91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@ define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@ define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@ define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@ define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@ define <vscale x 1 x double> @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@ define <vscale x 2 x double> @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 4 x double> @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@ define <vscale x 8 x double> @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
index fc170789a0cf..46de3bf2baec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@ define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@ define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@ define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@ define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@ define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@ define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@ define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
index 5455d7ead02b..38716d32bcc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x float>  @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@ define <vscale x 2 x float>  @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@ define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@ define <vscale x 4 x float>  @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@ define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 8 x float>  @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@ define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@ define <vscale x 16 x float>  @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vsca
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@ define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@ define <vscale x 1 x double>  @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@ define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@ define <vscale x 2 x double>  @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@ define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@ define <vscale x 4 x double>  @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@ define <vscale x 8 x double>  @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@ define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
index ffff9c78c56e..2aa947184297 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
     <vscale x 2 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
     <vscale x 2 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
     <vscale x 2 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
     <vscale x 2 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
     <vscale x 1 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
     <vscale x 1 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f64(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
index 7941dc7b6ece..0f65959da89b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
     <vscale x 2 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
     <vscale x 2 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
     <vscale x 2 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
     <vscale x 2 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
     <vscale x 1 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
     <vscale x 1 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
index 203d0e5cd6b3..744d6e1a88a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
     <vscale x 2 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
     <vscale x 2 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
     <vscale x 2 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
     <vscale x 2 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
     <vscale x 1 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
     <vscale x 1 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv1f64(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
index 4a6b5a76daa0..bdab478153b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
     <vscale x 2 x float> %0,
@@ -35,7 +35,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
     <vscale x 2 x float> %0,
@@ -127,7 +127,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -150,7 +150,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
     <vscale x 2 x float> %0,
@@ -173,7 +173,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -196,7 +196,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
     <vscale x 2 x float> %0,
@@ -219,7 +219,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -242,7 +242,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -265,7 +265,7 @@ define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -334,7 +334,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
     <vscale x 1 x double> %0,
@@ -357,7 +357,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -380,7 +380,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
     <vscale x 1 x double> %0,
@@ -403,7 +403,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
@@ -495,7 +495,7 @@ define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
index b966461efffc..4f96c8fd1c41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
index ddf5652fed74..81dfb7d5f221 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16(<vs
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32(<vsca
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
index 4d00c889b189..48b76bda76d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -215,7 +215,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -238,7 +238,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -260,7 +260,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -283,7 +283,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -305,7 +305,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -328,7 +328,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -373,7 +373,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -396,7 +396,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -419,7 +419,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -442,7 +442,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -511,7 +511,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -534,7 +534,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -557,7 +557,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -580,7 +580,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -603,7 +603,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -626,7 +626,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -649,7 +649,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -672,7 +672,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -695,7 +695,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -718,7 +718,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -741,7 +741,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -764,7 +764,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -810,7 +810,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
index 2718893a3c3f..b97ee74fa2ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -215,7 +215,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -238,7 +238,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -260,7 +260,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -283,7 +283,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -305,7 +305,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -328,7 +328,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -350,7 +350,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -373,7 +373,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -396,7 +396,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -419,7 +419,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -442,7 +442,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -488,7 +488,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -511,7 +511,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -534,7 +534,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -557,7 +557,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16(<vscale x
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -580,7 +580,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16(<vsc
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -603,7 +603,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16(<vscal
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -626,7 +626,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16(<
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -649,7 +649,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -672,7 +672,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -695,7 +695,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -718,7 +718,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -741,7 +741,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -764,7 +764,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32(<vscale
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -810,7 +810,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32(<vs
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
index 99f773d30371..fc347229be82 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
   i32);
 
@@ -9,7 +9,7 @@ define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
     i32 %0)
@@ -27,7 +27,7 @@ define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -45,7 +45,7 @@ define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
     i32 %0)
@@ -63,7 +63,7 @@ define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
     i32 %0)
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -117,7 +117,7 @@ define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
     i32 %0)
@@ -135,7 +135,7 @@ define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -153,7 +153,7 @@ define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
     i32 %0)
@@ -171,7 +171,7 @@ define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -189,7 +189,7 @@ define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
     i32 %0)
@@ -207,7 +207,7 @@ define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -225,7 +225,7 @@ define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
     i32 %0)
@@ -243,7 +243,7 @@ define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -261,7 +261,7 @@ define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
     i32 %0)
@@ -279,7 +279,7 @@ define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -297,7 +297,7 @@ define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
     i32 %0)
@@ -315,7 +315,7 @@ define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -333,7 +333,7 @@ define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
     i32 %0)
@@ -351,7 +351,7 @@ define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -369,7 +369,7 @@ define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
     i32 %0)
@@ -387,7 +387,7 @@ define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -405,7 +405,7 @@ define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
     i32 %0)
@@ -423,7 +423,7 @@ define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -441,7 +441,7 @@ define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
     i32 %0)
@@ -459,7 +459,7 @@ define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -477,7 +477,7 @@ define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
     i32 %0)
@@ -495,7 +495,7 @@ define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -513,7 +513,7 @@ define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
     i32 %0)
@@ -531,7 +531,7 @@ define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -549,7 +549,7 @@ define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
     i32 %0)
@@ -567,7 +567,7 @@ define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
     i32 %0)
@@ -603,7 +603,7 @@ define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -621,7 +621,7 @@ define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
     i32 %0)
@@ -639,7 +639,7 @@ define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -657,7 +657,7 @@ define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
     i32 %0)
@@ -675,7 +675,7 @@ define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -693,7 +693,7 @@ define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
     i32 %0)
@@ -711,7 +711,7 @@ define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -729,7 +729,7 @@ define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
     i32 %0)
@@ -747,7 +747,7 @@ define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
index 10f060b67250..8285713a51bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
   i64);
 
@@ -9,7 +9,7 @@ define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
     i64 %0)
@@ -27,7 +27,7 @@ define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -45,7 +45,7 @@ define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
     i64 %0)
@@ -63,7 +63,7 @@ define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
     i64 %0)
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -117,7 +117,7 @@ define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
     i64 %0)
@@ -135,7 +135,7 @@ define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -153,7 +153,7 @@ define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
     i64 %0)
@@ -171,7 +171,7 @@ define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -189,7 +189,7 @@ define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
     i64 %0)
@@ -207,7 +207,7 @@ define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -225,7 +225,7 @@ define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
     i64 %0)
@@ -243,7 +243,7 @@ define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -261,7 +261,7 @@ define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
     i64 %0)
@@ -279,7 +279,7 @@ define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -297,7 +297,7 @@ define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
     i64 %0)
@@ -315,7 +315,7 @@ define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -333,7 +333,7 @@ define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
     i64 %0)
@@ -351,7 +351,7 @@ define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -369,7 +369,7 @@ define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
     i64 %0)
@@ -387,7 +387,7 @@ define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -405,7 +405,7 @@ define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
     i64 %0)
@@ -423,7 +423,7 @@ define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -441,7 +441,7 @@ define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
     i64 %0)
@@ -459,7 +459,7 @@ define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -477,7 +477,7 @@ define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
     i64 %0)
@@ -495,7 +495,7 @@ define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -513,7 +513,7 @@ define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
     i64 %0)
@@ -531,7 +531,7 @@ define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -549,7 +549,7 @@ define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
     i64 %0)
@@ -567,7 +567,7 @@ define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -585,7 +585,7 @@ define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
     i64 %0)
@@ -603,7 +603,7 @@ define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -621,7 +621,7 @@ define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
     i64 %0)
@@ -639,7 +639,7 @@ define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -657,7 +657,7 @@ define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
     i64 %0)
@@ -675,7 +675,7 @@ define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -693,7 +693,7 @@ define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
     i64 %0)
@@ -711,7 +711,7 @@ define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -729,7 +729,7 @@ define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
     i64 %0)
@@ -747,7 +747,7 @@ define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
index 109bd3968e3a..de22172b74bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
   <vscale x 1 x i1>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
     <vscale x 1 x i1> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
     <vscale x 2 x i1> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
     <vscale x 4 x i1> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
     <vscale x 8 x i1> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
     <vscale x 16 x i1> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
     <vscale x 32 x i1> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -250,7 +250,7 @@ define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
     <vscale x 64 x i1> %0,
@@ -270,7 +270,7 @@ define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -290,7 +290,7 @@ define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
     <vscale x 1 x i1> %0,
@@ -310,7 +310,7 @@ define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -330,7 +330,7 @@ define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
     <vscale x 2 x i1> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -370,7 +370,7 @@ define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
     <vscale x 4 x i1> %0,
@@ -390,7 +390,7 @@ define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -410,7 +410,7 @@ define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
     <vscale x 8 x i1> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -450,7 +450,7 @@ define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
     <vscale x 16 x i1> %0,
@@ -470,7 +470,7 @@ define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -490,7 +490,7 @@ define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
     <vscale x 32 x i1> %0,
@@ -510,7 +510,7 @@ define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -530,7 +530,7 @@ define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
     <vscale x 1 x i1> %0,
@@ -550,7 +550,7 @@ define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -570,7 +570,7 @@ define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
     <vscale x 2 x i1> %0,
@@ -590,7 +590,7 @@ define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
     <vscale x 4 x i1> %0,
@@ -630,7 +630,7 @@ define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -650,7 +650,7 @@ define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
     <vscale x 8 x i1> %0,
@@ -670,7 +670,7 @@ define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -690,7 +690,7 @@ define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
     <vscale x 16 x i1> %0,
@@ -710,7 +710,7 @@ define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -730,7 +730,7 @@ define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
     <vscale x 1 x i1> %0,
@@ -750,7 +750,7 @@ define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -770,7 +770,7 @@ define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
     <vscale x 2 x i1> %0,
@@ -790,7 +790,7 @@ define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -810,7 +810,7 @@ define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
     <vscale x 4 x i1> %0,
@@ -830,7 +830,7 @@ define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
     <vscale x 8 x i1> %0,
@@ -870,7 +870,7 @@ define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
index 0f97ce3c39ce..c199a744733d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
   <vscale x 1 x i1>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
     <vscale x 1 x i1> %0,
@@ -30,7 +30,7 @@ define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -50,7 +50,7 @@ define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
     <vscale x 2 x i1> %0,
@@ -70,7 +70,7 @@ define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -90,7 +90,7 @@ define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
     <vscale x 4 x i1> %0,
@@ -110,7 +110,7 @@ define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -130,7 +130,7 @@ define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
     <vscale x 8 x i1> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -170,7 +170,7 @@ define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
     <vscale x 16 x i1> %0,
@@ -190,7 +190,7 @@ define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -210,7 +210,7 @@ define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
     <vscale x 32 x i1> %0,
@@ -230,7 +230,7 @@ define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -250,7 +250,7 @@ define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
     <vscale x 64 x i1> %0,
@@ -270,7 +270,7 @@ define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -290,7 +290,7 @@ define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
     <vscale x 1 x i1> %0,
@@ -310,7 +310,7 @@ define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -330,7 +330,7 @@ define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
     <vscale x 2 x i1> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -370,7 +370,7 @@ define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
     <vscale x 4 x i1> %0,
@@ -390,7 +390,7 @@ define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -410,7 +410,7 @@ define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
     <vscale x 8 x i1> %0,
@@ -430,7 +430,7 @@ define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -450,7 +450,7 @@ define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
     <vscale x 16 x i1> %0,
@@ -470,7 +470,7 @@ define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -490,7 +490,7 @@ define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
     <vscale x 32 x i1> %0,
@@ -510,7 +510,7 @@ define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -530,7 +530,7 @@ define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
     <vscale x 1 x i1> %0,
@@ -550,7 +550,7 @@ define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -570,7 +570,7 @@ define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
     <vscale x 2 x i1> %0,
@@ -590,7 +590,7 @@ define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
     <vscale x 4 x i1> %0,
@@ -630,7 +630,7 @@ define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -650,7 +650,7 @@ define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
     <vscale x 8 x i1> %0,
@@ -670,7 +670,7 @@ define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -690,7 +690,7 @@ define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
     <vscale x 16 x i1> %0,
@@ -710,7 +710,7 @@ define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -730,7 +730,7 @@ define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
     <vscale x 1 x i1> %0,
@@ -750,7 +750,7 @@ define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -770,7 +770,7 @@ define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
     <vscale x 2 x i1> %0,
@@ -790,7 +790,7 @@ define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -810,7 +810,7 @@ define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
     <vscale x 4 x i1> %0,
@@ -830,7 +830,7 @@ define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
     <vscale x 8 x i1> %0,
@@ -870,7 +870,7 @@ define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
index 4e4d78158f93..6be4e416b984 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
   <vscale x 1 x i64>*,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -51,7 +51,7 @@ define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -71,7 +71,7 @@ define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -91,7 +91,7 @@ define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -111,7 +111,7 @@ define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -131,7 +131,7 @@ define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -151,7 +151,7 @@ define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -171,7 +171,7 @@ define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x doub
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
     <vscale x 1 x double>* %0,
@@ -191,7 +191,7 @@ define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -211,7 +211,7 @@ define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x doub
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
     <vscale x 2 x double>* %0,
@@ -231,7 +231,7 @@ define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -251,7 +251,7 @@ define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x doub
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
     <vscale x 4 x double>* %0,
@@ -271,7 +271,7 @@ define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -291,7 +291,7 @@ define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x doub
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
     <vscale x 8 x double>* %0,
@@ -311,7 +311,7 @@ define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -331,7 +331,7 @@ define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -351,7 +351,7 @@ define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -371,7 +371,7 @@ define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -391,7 +391,7 @@ define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -411,7 +411,7 @@ define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -431,7 +431,7 @@ define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -451,7 +451,7 @@ define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -471,7 +471,7 @@ define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -491,7 +491,7 @@ define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -511,7 +511,7 @@ define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
     <vscale x 1 x float>* %0,
@@ -551,7 +551,7 @@ define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
     <vscale x 2 x float>* %0,
@@ -591,7 +591,7 @@ define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -611,7 +611,7 @@ define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
     <vscale x 4 x float>* %0,
@@ -631,7 +631,7 @@ define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -651,7 +651,7 @@ define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
     <vscale x 8 x float>* %0,
@@ -671,7 +671,7 @@ define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -691,7 +691,7 @@ define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
     <vscale x 16 x float>* %0,
@@ -711,7 +711,7 @@ define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -731,7 +731,7 @@ define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -751,7 +751,7 @@ define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -791,7 +791,7 @@ define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -811,7 +811,7 @@ define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -831,7 +831,7 @@ define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -851,7 +851,7 @@ define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -871,7 +871,7 @@ define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -891,7 +891,7 @@ define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -911,7 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -951,7 +951,7 @@ define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -971,7 +971,7 @@ define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
     <vscale x 1 x half>* %0,
@@ -991,7 +991,7 @@ define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1011,7 +1011,7 @@ define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
     <vscale x 2 x half>* %0,
@@ -1031,7 +1031,7 @@ define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1051,7 +1051,7 @@ define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
     <vscale x 4 x half>* %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1091,7 +1091,7 @@ define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
     <vscale x 8 x half>* %0,
@@ -1111,7 +1111,7 @@ define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1131,7 +1131,7 @@ define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
     <vscale x 16 x half>* %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1171,7 +1171,7 @@ define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
     <vscale x 32 x half>* %0,
@@ -1191,7 +1191,7 @@ define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1211,7 +1211,7 @@ define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -1231,7 +1231,7 @@ define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1251,7 +1251,7 @@ define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -1271,7 +1271,7 @@ define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -1311,7 +1311,7 @@ define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1331,7 +1331,7 @@ define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -1351,7 +1351,7 @@ define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1371,7 +1371,7 @@ define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -1391,7 +1391,7 @@ define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1411,7 +1411,7 @@ define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -1431,7 +1431,7 @@ define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1451,7 +1451,7 @@ define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -1471,7 +1471,7 @@ define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
index abf369442cf7..87ef0d9c1436 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
   <vscale x 1 x i64>*,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -51,7 +51,7 @@ define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -71,7 +71,7 @@ define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -91,7 +91,7 @@ define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -111,7 +111,7 @@ define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -131,7 +131,7 @@ define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -151,7 +151,7 @@ define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -171,7 +171,7 @@ define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x doub
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
     <vscale x 1 x double>* %0,
@@ -191,7 +191,7 @@ define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -211,7 +211,7 @@ define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x doub
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
     <vscale x 2 x double>* %0,
@@ -231,7 +231,7 @@ define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -251,7 +251,7 @@ define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x doub
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
     <vscale x 4 x double>* %0,
@@ -271,7 +271,7 @@ define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -291,7 +291,7 @@ define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x doub
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
     <vscale x 8 x double>* %0,
@@ -311,7 +311,7 @@ define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -331,7 +331,7 @@ define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -351,7 +351,7 @@ define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -371,7 +371,7 @@ define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -391,7 +391,7 @@ define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -411,7 +411,7 @@ define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -431,7 +431,7 @@ define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -451,7 +451,7 @@ define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -471,7 +471,7 @@ define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -491,7 +491,7 @@ define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -511,7 +511,7 @@ define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
     <vscale x 1 x float>* %0,
@@ -551,7 +551,7 @@ define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -571,7 +571,7 @@ define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
     <vscale x 2 x float>* %0,
@@ -591,7 +591,7 @@ define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -611,7 +611,7 @@ define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
     <vscale x 4 x float>* %0,
@@ -631,7 +631,7 @@ define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -651,7 +651,7 @@ define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
     <vscale x 8 x float>* %0,
@@ -671,7 +671,7 @@ define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -691,7 +691,7 @@ define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
     <vscale x 16 x float>* %0,
@@ -711,7 +711,7 @@ define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -731,7 +731,7 @@ define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -751,7 +751,7 @@ define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -791,7 +791,7 @@ define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -811,7 +811,7 @@ define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -831,7 +831,7 @@ define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -851,7 +851,7 @@ define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -871,7 +871,7 @@ define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -891,7 +891,7 @@ define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -911,7 +911,7 @@ define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -951,7 +951,7 @@ define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -971,7 +971,7 @@ define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
     <vscale x 1 x half>* %0,
@@ -991,7 +991,7 @@ define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1011,7 +1011,7 @@ define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
     <vscale x 2 x half>* %0,
@@ -1031,7 +1031,7 @@ define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1051,7 +1051,7 @@ define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
     <vscale x 4 x half>* %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1091,7 +1091,7 @@ define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
     <vscale x 8 x half>* %0,
@@ -1111,7 +1111,7 @@ define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1131,7 +1131,7 @@ define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
     <vscale x 16 x half>* %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1171,7 +1171,7 @@ define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
     <vscale x 32 x half>* %0,
@@ -1191,7 +1191,7 @@ define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1211,7 +1211,7 @@ define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -1231,7 +1231,7 @@ define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1251,7 +1251,7 @@ define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -1271,7 +1271,7 @@ define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -1311,7 +1311,7 @@ define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1331,7 +1331,7 @@ define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -1351,7 +1351,7 @@ define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1371,7 +1371,7 @@ define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -1391,7 +1391,7 @@ define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1411,7 +1411,7 @@ define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -1431,7 +1431,7 @@ define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1451,7 +1451,7 @@ define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -1471,7 +1471,7 @@ define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll
index f7040f7885d5..7688c4bb3557 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 
 declare <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1(<vscale x 1 x i1>*, i32);
 
@@ -9,7 +9,7 @@ define <vscale x 1 x i1> @intrinsic_vle1_v_nxv1i1(<vscale x 1 x i1>* %0, i32 %1)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1(<vscale x 1 x i1>* %0, i32 %1)
   ret <vscale x 1 x i1> %a
@@ -22,7 +22,7 @@ define <vscale x 2 x i1> @intrinsic_vle1_v_nxv2i1(<vscale x 2 x i1>* %0, i32 %1)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1(<vscale x 2 x i1>* %0, i32 %1)
   ret <vscale x 2 x i1> %a
@@ -35,7 +35,7 @@ define <vscale x 4 x i1> @intrinsic_vle1_v_nxv4i1(<vscale x 4 x i1>* %0, i32 %1)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1(<vscale x 4 x i1>* %0, i32 %1)
   ret <vscale x 4 x i1> %a
@@ -48,7 +48,7 @@ define <vscale x 8 x i1> @intrinsic_vle1_v_nxv8i1(<vscale x 8 x i1>* %0, i32 %1)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1(<vscale x 8 x i1>* %0, i32 %1)
   ret <vscale x 8 x i1> %a
@@ -61,7 +61,7 @@ define <vscale x 16 x i1> @intrinsic_vle1_v_nxv16i1(<vscale x 16 x i1>* %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1(<vscale x 16 x i1>* %0, i32 %1)
   ret <vscale x 16 x i1> %a
@@ -74,7 +74,7 @@ define <vscale x 32 x i1> @intrinsic_vle1_v_nxv32i1(<vscale x 32 x i1>* %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1(<vscale x 32 x i1>* %0, i32 %1)
   ret <vscale x 32 x i1> %a
@@ -87,7 +87,7 @@ define <vscale x 64 x i1> @intrinsic_vle1_v_nxv64i1(<vscale x 64 x i1>* %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1(<vscale x 64 x i1>* %0, i32 %1)
   ret <vscale x 64 x i1> %a

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll
index 46c91f5f6b39..bc9f7df914e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 
 declare <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1(<vscale x 1 x i1>*, i64);
 
@@ -9,7 +9,7 @@ define <vscale x 1 x i1> @intrinsic_vle1_v_nxv1i1(<vscale x 1 x i1>* %0, i64 %1)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1(<vscale x 1 x i1>* %0, i64 %1)
   ret <vscale x 1 x i1> %a
@@ -22,7 +22,7 @@ define <vscale x 2 x i1> @intrinsic_vle1_v_nxv2i1(<vscale x 2 x i1>* %0, i64 %1)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1(<vscale x 2 x i1>* %0, i64 %1)
   ret <vscale x 2 x i1> %a
@@ -35,7 +35,7 @@ define <vscale x 4 x i1> @intrinsic_vle1_v_nxv4i1(<vscale x 4 x i1>* %0, i64 %1)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1(<vscale x 4 x i1>* %0, i64 %1)
   ret <vscale x 4 x i1> %a
@@ -48,7 +48,7 @@ define <vscale x 8 x i1> @intrinsic_vle1_v_nxv8i1(<vscale x 8 x i1>* %0, i64 %1)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1(<vscale x 8 x i1>* %0, i64 %1)
   ret <vscale x 8 x i1> %a
@@ -61,7 +61,7 @@ define <vscale x 16 x i1> @intrinsic_vle1_v_nxv16i1(<vscale x 16 x i1>* %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1(<vscale x 16 x i1>* %0, i64 %1)
   ret <vscale x 16 x i1> %a
@@ -74,7 +74,7 @@ define <vscale x 32 x i1> @intrinsic_vle1_v_nxv32i1(<vscale x 32 x i1>* %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1(<vscale x 32 x i1>* %0, i64 %1)
   ret <vscale x 32 x i1> %a
@@ -87,7 +87,7 @@ define <vscale x 64 x i1> @intrinsic_vle1_v_nxv64i1(<vscale x 64 x i1>* %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1(<vscale x 64 x i1>* %0, i64 %1)
   ret <vscale x 64 x i1> %a

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
index 485e363a8ea8..9feafc48269d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>*,
   <vscale x 1 x i64>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8>* %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8>* %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8>* %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8>* %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16>* %0,
@@ -214,7 +214,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16>* %0,
@@ -259,7 +259,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16>* %0,
@@ -304,7 +304,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16>* %0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32>* %0,
@@ -394,7 +394,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32>* %0,
@@ -439,7 +439,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32>* %0,
@@ -484,7 +484,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32>* %0,
@@ -529,7 +529,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -551,7 +551,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -573,7 +573,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -595,7 +595,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -617,7 +617,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -639,7 +639,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -683,7 +683,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -705,7 +705,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -728,7 +728,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
     <vscale x 1 x half>* %0,
@@ -750,7 +750,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -773,7 +773,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
     <vscale x 2 x half>* %0,
@@ -795,7 +795,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -818,7 +818,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
     <vscale x 4 x half>* %0,
@@ -840,7 +840,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
     <vscale x 8 x half>* %0,
@@ -885,7 +885,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -908,7 +908,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
     <vscale x 1 x float>* %0,
@@ -930,7 +930,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -953,7 +953,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
     <vscale x 2 x float>* %0,
@@ -975,7 +975,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -998,7 +998,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
     <vscale x 4 x float>* %0,
@@ -1020,7 +1020,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
     <vscale x 8 x float>* %0,
@@ -1065,7 +1065,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1087,7 +1087,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
     <vscale x 1 x double>* %0,
@@ -1109,7 +1109,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1131,7 +1131,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
     <vscale x 2 x double>* %0,
@@ -1153,7 +1153,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
     <vscale x 4 x double>* %0,
@@ -1197,7 +1197,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1219,7 +1219,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
     <vscale x 8 x double>* %0,
@@ -1241,7 +1241,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1264,7 +1264,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8>* %0,
@@ -1286,7 +1286,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8>* %0,
@@ -1331,7 +1331,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1354,7 +1354,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8>* %0,
@@ -1376,7 +1376,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1399,7 +1399,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8>* %0,
@@ -1421,7 +1421,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1444,7 +1444,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8>* %0,
@@ -1466,7 +1466,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16>* %0,
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1534,7 +1534,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16>* %0,
@@ -1556,7 +1556,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16>* %0,
@@ -1601,7 +1601,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1624,7 +1624,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16>* %0,
@@ -1646,7 +1646,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16>* %0,
@@ -1691,7 +1691,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1713,7 +1713,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1757,7 +1757,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -1779,7 +1779,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -1823,7 +1823,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -1867,7 +1867,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1889,7 +1889,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -1911,7 +1911,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1934,7 +1934,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64>* %0,
@@ -1956,7 +1956,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1979,7 +1979,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64>* %0,
@@ -2001,7 +2001,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2024,7 +2024,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64>* %0,
@@ -2046,7 +2046,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2069,7 +2069,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64>* %0,
@@ -2091,7 +2091,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2114,7 +2114,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
     <vscale x 1 x half>* %0,
@@ -2136,7 +2136,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2159,7 +2159,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
     <vscale x 2 x half>* %0,
@@ -2181,7 +2181,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2204,7 +2204,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
     <vscale x 4 x half>* %0,
@@ -2226,7 +2226,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
     <vscale x 8 x half>* %0,
@@ -2271,7 +2271,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2294,7 +2294,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
     <vscale x 16 x half>* %0,
@@ -2316,7 +2316,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2338,7 +2338,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
     <vscale x 1 x float>* %0,
@@ -2360,7 +2360,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
     <vscale x 2 x float>* %0,
@@ -2404,7 +2404,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
     <vscale x 4 x float>* %0,
@@ -2448,7 +2448,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2470,7 +2470,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
     <vscale x 8 x float>* %0,
@@ -2492,7 +2492,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2514,7 +2514,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
     <vscale x 16 x float>* %0,
@@ -2536,7 +2536,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2559,7 +2559,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
     <vscale x 1 x double>* %0,
@@ -2581,7 +2581,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2604,7 +2604,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
     <vscale x 2 x double>* %0,
@@ -2626,7 +2626,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2649,7 +2649,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
     <vscale x 4 x double>* %0,
@@ -2671,7 +2671,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2694,7 +2694,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
     <vscale x 8 x double>* %0,
@@ -2716,7 +2716,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2739,7 +2739,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8>* %0,
@@ -2761,7 +2761,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2784,7 +2784,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8>* %0,
@@ -2806,7 +2806,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2829,7 +2829,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8>* %0,
@@ -2851,7 +2851,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2874,7 +2874,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8>* %0,
@@ -2896,7 +2896,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2919,7 +2919,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8>* %0,
@@ -2941,7 +2941,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2964,7 +2964,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8>* %0,
@@ -2986,7 +2986,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3008,7 +3008,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -3030,7 +3030,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3052,7 +3052,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -3074,7 +3074,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3096,7 +3096,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -3118,7 +3118,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3140,7 +3140,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -3162,7 +3162,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3184,7 +3184,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -3206,7 +3206,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3228,7 +3228,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -3250,7 +3250,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3273,7 +3273,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32>* %0,
@@ -3295,7 +3295,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3318,7 +3318,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32>* %0,
@@ -3340,7 +3340,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3363,7 +3363,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32>* %0,
@@ -3385,7 +3385,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3408,7 +3408,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32>* %0,
@@ -3430,7 +3430,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3453,7 +3453,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32>* %0,
@@ -3475,7 +3475,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3498,7 +3498,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64>* %0,
@@ -3520,7 +3520,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3543,7 +3543,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64>* %0,
@@ -3565,7 +3565,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3588,7 +3588,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64>* %0,
@@ -3610,7 +3610,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3633,7 +3633,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64>* %0,
@@ -3655,7 +3655,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3677,7 +3677,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
     <vscale x 1 x half>* %0,
@@ -3699,7 +3699,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3721,7 +3721,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
     <vscale x 2 x half>* %0,
@@ -3743,7 +3743,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3765,7 +3765,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
     <vscale x 4 x half>* %0,
@@ -3787,7 +3787,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3809,7 +3809,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
     <vscale x 8 x half>* %0,
@@ -3831,7 +3831,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3853,7 +3853,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
     <vscale x 16 x half>* %0,
@@ -3875,7 +3875,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3897,7 +3897,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
     <vscale x 32 x half>* %0,
@@ -3919,7 +3919,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3942,7 +3942,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
     <vscale x 1 x float>* %0,
@@ -3964,7 +3964,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3987,7 +3987,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
     <vscale x 2 x float>* %0,
@@ -4009,7 +4009,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4032,7 +4032,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
     <vscale x 4 x float>* %0,
@@ -4054,7 +4054,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4077,7 +4077,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
     <vscale x 8 x float>* %0,
@@ -4099,7 +4099,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4122,7 +4122,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
     <vscale x 16 x float>* %0,
@@ -4144,7 +4144,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4167,7 +4167,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
     <vscale x 1 x double>* %0,
@@ -4189,7 +4189,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4212,7 +4212,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
     <vscale x 2 x double>* %0,
@@ -4234,7 +4234,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4257,7 +4257,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
     <vscale x 4 x double>* %0,
@@ -4279,7 +4279,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4302,7 +4302,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
     <vscale x 8 x double>* %0,
@@ -4324,7 +4324,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4346,7 +4346,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -4368,7 +4368,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4390,7 +4390,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -4412,7 +4412,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4434,7 +4434,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -4456,7 +4456,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4478,7 +4478,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -4500,7 +4500,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4522,7 +4522,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -4544,7 +4544,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4566,7 +4566,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -4588,7 +4588,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4610,7 +4610,7 @@ define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -4632,7 +4632,7 @@ define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4655,7 +4655,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16>* %0,
@@ -4677,7 +4677,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4700,7 +4700,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16>* %0,
@@ -4722,7 +4722,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4745,7 +4745,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16>* %0,
@@ -4767,7 +4767,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4790,7 +4790,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16>* %0,
@@ -4812,7 +4812,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4835,7 +4835,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16>* %0,
@@ -4857,7 +4857,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4880,7 +4880,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16>* %0,
@@ -4902,7 +4902,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4925,7 +4925,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32>* %0,
@@ -4947,7 +4947,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4970,7 +4970,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32>* %0,
@@ -4992,7 +4992,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5015,7 +5015,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32>* %0,
@@ -5037,7 +5037,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5060,7 +5060,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32>* %0,
@@ -5082,7 +5082,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5105,7 +5105,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32>* %0,
@@ -5127,7 +5127,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5150,7 +5150,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64>* %0,
@@ -5172,7 +5172,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5195,7 +5195,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64>* %0,
@@ -5217,7 +5217,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5240,7 +5240,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64>* %0,
@@ -5262,7 +5262,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5285,7 +5285,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64>* %0,
@@ -5307,7 +5307,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5330,7 +5330,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
     <vscale x 1 x half>* %0,
@@ -5352,7 +5352,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5375,7 +5375,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
     <vscale x 2 x half>* %0,
@@ -5397,7 +5397,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5420,7 +5420,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
     <vscale x 4 x half>* %0,
@@ -5442,7 +5442,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5465,7 +5465,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
     <vscale x 8 x half>* %0,
@@ -5487,7 +5487,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5510,7 +5510,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
     <vscale x 16 x half>* %0,
@@ -5532,7 +5532,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5555,7 +5555,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
     <vscale x 32 x half>* %0,
@@ -5577,7 +5577,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5600,7 +5600,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
     <vscale x 1 x float>* %0,
@@ -5622,7 +5622,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5645,7 +5645,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
     <vscale x 2 x float>* %0,
@@ -5667,7 +5667,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5690,7 +5690,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
     <vscale x 4 x float>* %0,
@@ -5712,7 +5712,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5735,7 +5735,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
     <vscale x 8 x float>* %0,
@@ -5757,7 +5757,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5780,7 +5780,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
     <vscale x 16 x float>* %0,
@@ -5802,7 +5802,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5825,7 +5825,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
     <vscale x 1 x double>* %0,
@@ -5847,7 +5847,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5870,7 +5870,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
     <vscale x 2 x double>* %0,
@@ -5892,7 +5892,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5915,7 +5915,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
     <vscale x 4 x double>* %0,
@@ -5937,7 +5937,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5960,7 +5960,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
     <vscale x 8 x double>* %0,
@@ -5982,7 +5982,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
index 05204e671f2c..c58373247e7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>*,
   <vscale x 1 x i64>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8>* %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8>* %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8>* %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8>* %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16>* %0,
@@ -214,7 +214,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16>* %0,
@@ -259,7 +259,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16>* %0,
@@ -304,7 +304,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16>* %0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32>* %0,
@@ -394,7 +394,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32>* %0,
@@ -439,7 +439,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32>* %0,
@@ -484,7 +484,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32>* %0,
@@ -529,7 +529,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -551,7 +551,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -573,7 +573,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -595,7 +595,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -617,7 +617,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -639,7 +639,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -683,7 +683,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -705,7 +705,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -728,7 +728,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
     <vscale x 1 x half>* %0,
@@ -750,7 +750,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -773,7 +773,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
     <vscale x 2 x half>* %0,
@@ -795,7 +795,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -818,7 +818,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
     <vscale x 4 x half>* %0,
@@ -840,7 +840,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
     <vscale x 8 x half>* %0,
@@ -885,7 +885,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -908,7 +908,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
     <vscale x 1 x float>* %0,
@@ -930,7 +930,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -953,7 +953,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
     <vscale x 2 x float>* %0,
@@ -975,7 +975,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -998,7 +998,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
     <vscale x 4 x float>* %0,
@@ -1020,7 +1020,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
     <vscale x 8 x float>* %0,
@@ -1065,7 +1065,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1087,7 +1087,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
     <vscale x 1 x double>* %0,
@@ -1109,7 +1109,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1131,7 +1131,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
     <vscale x 2 x double>* %0,
@@ -1153,7 +1153,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
     <vscale x 4 x double>* %0,
@@ -1197,7 +1197,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1219,7 +1219,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
     <vscale x 8 x double>* %0,
@@ -1241,7 +1241,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1264,7 +1264,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8>* %0,
@@ -1286,7 +1286,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8>* %0,
@@ -1331,7 +1331,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1354,7 +1354,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8>* %0,
@@ -1376,7 +1376,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1399,7 +1399,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8>* %0,
@@ -1421,7 +1421,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1444,7 +1444,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8>* %0,
@@ -1466,7 +1466,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16>* %0,
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1534,7 +1534,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16>* %0,
@@ -1556,7 +1556,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16>* %0,
@@ -1601,7 +1601,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1624,7 +1624,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16>* %0,
@@ -1646,7 +1646,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16>* %0,
@@ -1691,7 +1691,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1713,7 +1713,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1757,7 +1757,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -1779,7 +1779,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -1823,7 +1823,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -1867,7 +1867,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1889,7 +1889,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -1911,7 +1911,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1934,7 +1934,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64>* %0,
@@ -1956,7 +1956,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1979,7 +1979,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64>* %0,
@@ -2001,7 +2001,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2024,7 +2024,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64>* %0,
@@ -2046,7 +2046,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2069,7 +2069,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64>* %0,
@@ -2091,7 +2091,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2114,7 +2114,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
     <vscale x 1 x half>* %0,
@@ -2136,7 +2136,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2159,7 +2159,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
     <vscale x 2 x half>* %0,
@@ -2181,7 +2181,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2204,7 +2204,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
     <vscale x 4 x half>* %0,
@@ -2226,7 +2226,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
     <vscale x 8 x half>* %0,
@@ -2271,7 +2271,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2294,7 +2294,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
     <vscale x 16 x half>* %0,
@@ -2316,7 +2316,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2338,7 +2338,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
     <vscale x 1 x float>* %0,
@@ -2360,7 +2360,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
     <vscale x 2 x float>* %0,
@@ -2404,7 +2404,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
     <vscale x 4 x float>* %0,
@@ -2448,7 +2448,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2470,7 +2470,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
     <vscale x 8 x float>* %0,
@@ -2492,7 +2492,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2514,7 +2514,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
     <vscale x 16 x float>* %0,
@@ -2536,7 +2536,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2559,7 +2559,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
     <vscale x 1 x double>* %0,
@@ -2581,7 +2581,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2604,7 +2604,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
     <vscale x 2 x double>* %0,
@@ -2626,7 +2626,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2649,7 +2649,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
     <vscale x 4 x double>* %0,
@@ -2671,7 +2671,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2694,7 +2694,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
     <vscale x 8 x double>* %0,
@@ -2716,7 +2716,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2739,7 +2739,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8>* %0,
@@ -2761,7 +2761,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2784,7 +2784,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8>* %0,
@@ -2806,7 +2806,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2829,7 +2829,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8>* %0,
@@ -2851,7 +2851,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2874,7 +2874,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8>* %0,
@@ -2896,7 +2896,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2919,7 +2919,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8>* %0,
@@ -2941,7 +2941,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2964,7 +2964,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8>* %0,
@@ -2986,7 +2986,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3008,7 +3008,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -3030,7 +3030,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3052,7 +3052,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -3074,7 +3074,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3096,7 +3096,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -3118,7 +3118,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3140,7 +3140,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -3162,7 +3162,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3184,7 +3184,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -3206,7 +3206,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3228,7 +3228,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -3250,7 +3250,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3273,7 +3273,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32>* %0,
@@ -3295,7 +3295,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3318,7 +3318,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32>* %0,
@@ -3340,7 +3340,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3363,7 +3363,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32>* %0,
@@ -3385,7 +3385,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3408,7 +3408,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32>* %0,
@@ -3430,7 +3430,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3453,7 +3453,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32>* %0,
@@ -3475,7 +3475,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3498,7 +3498,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64>* %0,
@@ -3520,7 +3520,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3543,7 +3543,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64>* %0,
@@ -3565,7 +3565,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3588,7 +3588,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64>* %0,
@@ -3610,7 +3610,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3633,7 +3633,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64>* %0,
@@ -3655,7 +3655,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3677,7 +3677,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
     <vscale x 1 x half>* %0,
@@ -3699,7 +3699,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3721,7 +3721,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
     <vscale x 2 x half>* %0,
@@ -3743,7 +3743,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3765,7 +3765,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
     <vscale x 4 x half>* %0,
@@ -3787,7 +3787,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3809,7 +3809,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
     <vscale x 8 x half>* %0,
@@ -3831,7 +3831,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3853,7 +3853,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
     <vscale x 16 x half>* %0,
@@ -3875,7 +3875,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3897,7 +3897,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
     <vscale x 32 x half>* %0,
@@ -3919,7 +3919,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3942,7 +3942,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
     <vscale x 1 x float>* %0,
@@ -3964,7 +3964,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3987,7 +3987,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
     <vscale x 2 x float>* %0,
@@ -4009,7 +4009,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4032,7 +4032,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
     <vscale x 4 x float>* %0,
@@ -4054,7 +4054,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4077,7 +4077,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
     <vscale x 8 x float>* %0,
@@ -4099,7 +4099,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4122,7 +4122,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
     <vscale x 16 x float>* %0,
@@ -4144,7 +4144,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4167,7 +4167,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
     <vscale x 1 x double>* %0,
@@ -4189,7 +4189,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4212,7 +4212,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
     <vscale x 2 x double>* %0,
@@ -4234,7 +4234,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4257,7 +4257,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
     <vscale x 4 x double>* %0,
@@ -4279,7 +4279,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4302,7 +4302,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
     <vscale x 8 x double>* %0,
@@ -4324,7 +4324,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4346,7 +4346,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -4368,7 +4368,7 @@ define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4390,7 +4390,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -4412,7 +4412,7 @@ define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4434,7 +4434,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -4456,7 +4456,7 @@ define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4478,7 +4478,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -4500,7 +4500,7 @@ define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4522,7 +4522,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -4544,7 +4544,7 @@ define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4566,7 +4566,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -4588,7 +4588,7 @@ define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4610,7 +4610,7 @@ define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -4632,7 +4632,7 @@ define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4655,7 +4655,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16>* %0,
@@ -4677,7 +4677,7 @@ define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4700,7 +4700,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16>* %0,
@@ -4722,7 +4722,7 @@ define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4745,7 +4745,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16>* %0,
@@ -4767,7 +4767,7 @@ define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4790,7 +4790,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16>* %0,
@@ -4812,7 +4812,7 @@ define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4835,7 +4835,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16>* %0,
@@ -4857,7 +4857,7 @@ define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4880,7 +4880,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16>* %0,
@@ -4902,7 +4902,7 @@ define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4925,7 +4925,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32>* %0,
@@ -4947,7 +4947,7 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4970,7 +4970,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32>* %0,
@@ -4992,7 +4992,7 @@ define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5015,7 +5015,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32>* %0,
@@ -5037,7 +5037,7 @@ define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5060,7 +5060,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32>* %0,
@@ -5082,7 +5082,7 @@ define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5105,7 +5105,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32>* %0,
@@ -5127,7 +5127,7 @@ define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5150,7 +5150,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64>* %0,
@@ -5172,7 +5172,7 @@ define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5195,7 +5195,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64>* %0,
@@ -5217,7 +5217,7 @@ define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5240,7 +5240,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64>* %0,
@@ -5262,7 +5262,7 @@ define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5285,7 +5285,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64>* %0,
@@ -5307,7 +5307,7 @@ define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5330,7 +5330,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
     <vscale x 1 x half>* %0,
@@ -5352,7 +5352,7 @@ define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5375,7 +5375,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
     <vscale x 2 x half>* %0,
@@ -5397,7 +5397,7 @@ define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5420,7 +5420,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
     <vscale x 4 x half>* %0,
@@ -5442,7 +5442,7 @@ define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5465,7 +5465,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
     <vscale x 8 x half>* %0,
@@ -5487,7 +5487,7 @@ define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5510,7 +5510,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
     <vscale x 16 x half>* %0,
@@ -5532,7 +5532,7 @@ define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5555,7 +5555,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
     <vscale x 32 x half>* %0,
@@ -5577,7 +5577,7 @@ define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5600,7 +5600,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
     <vscale x 1 x float>* %0,
@@ -5622,7 +5622,7 @@ define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5645,7 +5645,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
     <vscale x 2 x float>* %0,
@@ -5667,7 +5667,7 @@ define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5690,7 +5690,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
     <vscale x 4 x float>* %0,
@@ -5712,7 +5712,7 @@ define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5735,7 +5735,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
     <vscale x 8 x float>* %0,
@@ -5757,7 +5757,7 @@ define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5780,7 +5780,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
     <vscale x 16 x float>* %0,
@@ -5802,7 +5802,7 @@ define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5825,7 +5825,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
     <vscale x 1 x double>* %0,
@@ -5847,7 +5847,7 @@ define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5870,7 +5870,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
     <vscale x 2 x double>* %0,
@@ -5892,7 +5892,7 @@ define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5915,7 +5915,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
     <vscale x 4 x double>* %0,
@@ -5937,7 +5937,7 @@ define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5960,7 +5960,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
     <vscale x 8 x double>* %0,
@@ -5982,7 +5982,7 @@ define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
index dca50df93b50..cd73c099d63c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
   <vscale x 1 x i64>*,
   i32,
@@ -11,7 +11,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -187,7 +187,7 @@ define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x dou
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
     <vscale x 1 x double>* %0,
@@ -209,7 +209,7 @@ define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -231,7 +231,7 @@ define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x dou
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
     <vscale x 2 x double>* %0,
@@ -253,7 +253,7 @@ define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -275,7 +275,7 @@ define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x dou
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
     <vscale x 4 x double>* %0,
@@ -297,7 +297,7 @@ define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -319,7 +319,7 @@ define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x dou
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
     <vscale x 8 x double>* %0,
@@ -341,7 +341,7 @@ define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -363,7 +363,7 @@ define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -385,7 +385,7 @@ define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -407,7 +407,7 @@ define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -429,7 +429,7 @@ define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -451,7 +451,7 @@ define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -473,7 +473,7 @@ define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -517,7 +517,7 @@ define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -539,7 +539,7 @@ define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -561,7 +561,7 @@ define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -583,7 +583,7 @@ define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x floa
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
     <vscale x 1 x float>* %0,
@@ -605,7 +605,7 @@ define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -627,7 +627,7 @@ define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x floa
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
     <vscale x 2 x float>* %0,
@@ -649,7 +649,7 @@ define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -671,7 +671,7 @@ define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x floa
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
     <vscale x 4 x float>* %0,
@@ -693,7 +693,7 @@ define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -715,7 +715,7 @@ define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x floa
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
     <vscale x 8 x float>* %0,
@@ -737,7 +737,7 @@ define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -759,7 +759,7 @@ define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
     <vscale x 16 x float>* %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -803,7 +803,7 @@ define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -825,7 +825,7 @@ define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -847,7 +847,7 @@ define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -891,7 +891,7 @@ define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -913,7 +913,7 @@ define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -935,7 +935,7 @@ define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -957,7 +957,7 @@ define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -1001,7 +1001,7 @@ define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1023,7 +1023,7 @@ define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -1045,7 +1045,7 @@ define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1067,7 +1067,7 @@ define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
     <vscale x 1 x half>* %0,
@@ -1089,7 +1089,7 @@ define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
     <vscale x 2 x half>* %0,
@@ -1133,7 +1133,7 @@ define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1155,7 +1155,7 @@ define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
     <vscale x 4 x half>* %0,
@@ -1177,7 +1177,7 @@ define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1199,7 +1199,7 @@ define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
     <vscale x 8 x half>* %0,
@@ -1221,7 +1221,7 @@ define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1243,7 +1243,7 @@ define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
     <vscale x 16 x half>* %0,
@@ -1265,7 +1265,7 @@ define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1287,7 +1287,7 @@ define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
     <vscale x 32 x half>* %0,
@@ -1309,7 +1309,7 @@ define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1331,7 +1331,7 @@ define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -1353,7 +1353,7 @@ define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1375,7 +1375,7 @@ define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -1397,7 +1397,7 @@ define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1419,7 +1419,7 @@ define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -1441,7 +1441,7 @@ define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1463,7 +1463,7 @@ define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -1485,7 +1485,7 @@ define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -1529,7 +1529,7 @@ define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1551,7 +1551,7 @@ define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -1573,7 +1573,7 @@ define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1595,7 +1595,7 @@ define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -1617,7 +1617,7 @@ define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
index 180b35277276..af04c96c8d5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
   <vscale x 1 x i64>*,
   i64,
@@ -11,7 +11,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -187,7 +187,7 @@ define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x dou
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
     <vscale x 1 x double>* %0,
@@ -209,7 +209,7 @@ define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -231,7 +231,7 @@ define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x dou
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
     <vscale x 2 x double>* %0,
@@ -253,7 +253,7 @@ define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -275,7 +275,7 @@ define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x dou
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
     <vscale x 4 x double>* %0,
@@ -297,7 +297,7 @@ define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -319,7 +319,7 @@ define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x dou
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
     <vscale x 8 x double>* %0,
@@ -341,7 +341,7 @@ define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -363,7 +363,7 @@ define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -385,7 +385,7 @@ define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -407,7 +407,7 @@ define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -429,7 +429,7 @@ define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -451,7 +451,7 @@ define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -473,7 +473,7 @@ define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -517,7 +517,7 @@ define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -539,7 +539,7 @@ define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -561,7 +561,7 @@ define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -583,7 +583,7 @@ define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x floa
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
     <vscale x 1 x float>* %0,
@@ -605,7 +605,7 @@ define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -627,7 +627,7 @@ define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x floa
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
     <vscale x 2 x float>* %0,
@@ -649,7 +649,7 @@ define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -671,7 +671,7 @@ define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x floa
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
     <vscale x 4 x float>* %0,
@@ -693,7 +693,7 @@ define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -715,7 +715,7 @@ define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x floa
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
     <vscale x 8 x float>* %0,
@@ -737,7 +737,7 @@ define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -759,7 +759,7 @@ define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
     <vscale x 16 x float>* %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -803,7 +803,7 @@ define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -825,7 +825,7 @@ define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -847,7 +847,7 @@ define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -891,7 +891,7 @@ define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -913,7 +913,7 @@ define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -935,7 +935,7 @@ define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -957,7 +957,7 @@ define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -1001,7 +1001,7 @@ define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1023,7 +1023,7 @@ define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -1045,7 +1045,7 @@ define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1067,7 +1067,7 @@ define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
     <vscale x 1 x half>* %0,
@@ -1089,7 +1089,7 @@ define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
     <vscale x 2 x half>* %0,
@@ -1133,7 +1133,7 @@ define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1155,7 +1155,7 @@ define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
     <vscale x 4 x half>* %0,
@@ -1177,7 +1177,7 @@ define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1199,7 +1199,7 @@ define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
     <vscale x 8 x half>* %0,
@@ -1221,7 +1221,7 @@ define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1243,7 +1243,7 @@ define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
     <vscale x 16 x half>* %0,
@@ -1265,7 +1265,7 @@ define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1287,7 +1287,7 @@ define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
     <vscale x 32 x half>* %0,
@@ -1309,7 +1309,7 @@ define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1331,7 +1331,7 @@ define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -1353,7 +1353,7 @@ define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1375,7 +1375,7 @@ define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -1397,7 +1397,7 @@ define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1419,7 +1419,7 @@ define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -1441,7 +1441,7 @@ define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1463,7 +1463,7 @@ define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -1485,7 +1485,7 @@ define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -1529,7 +1529,7 @@ define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1551,7 +1551,7 @@ define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -1573,7 +1573,7 @@ define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1595,7 +1595,7 @@ define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -1617,7 +1617,7 @@ define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
index efb36487aac0..ad54666d92cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>*,
   <vscale x 1 x i64>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8>* %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8>* %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8>* %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8>* %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16>* %0,
@@ -214,7 +214,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16>* %0,
@@ -259,7 +259,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16>* %0,
@@ -304,7 +304,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16>* %0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32>* %0,
@@ -394,7 +394,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32>* %0,
@@ -439,7 +439,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32>* %0,
@@ -484,7 +484,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32>* %0,
@@ -529,7 +529,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -551,7 +551,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -573,7 +573,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -595,7 +595,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -617,7 +617,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -639,7 +639,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -683,7 +683,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -705,7 +705,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -728,7 +728,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
     <vscale x 1 x half>* %0,
@@ -750,7 +750,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -773,7 +773,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
     <vscale x 2 x half>* %0,
@@ -795,7 +795,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -818,7 +818,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
     <vscale x 4 x half>* %0,
@@ -840,7 +840,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
     <vscale x 8 x half>* %0,
@@ -885,7 +885,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -908,7 +908,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
     <vscale x 1 x float>* %0,
@@ -930,7 +930,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -953,7 +953,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
     <vscale x 2 x float>* %0,
@@ -975,7 +975,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -998,7 +998,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
     <vscale x 4 x float>* %0,
@@ -1020,7 +1020,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
     <vscale x 8 x float>* %0,
@@ -1065,7 +1065,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1087,7 +1087,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
     <vscale x 1 x double>* %0,
@@ -1109,7 +1109,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1131,7 +1131,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
     <vscale x 2 x double>* %0,
@@ -1153,7 +1153,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
     <vscale x 4 x double>* %0,
@@ -1197,7 +1197,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1219,7 +1219,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
     <vscale x 8 x double>* %0,
@@ -1241,7 +1241,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1264,7 +1264,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8>* %0,
@@ -1286,7 +1286,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8>* %0,
@@ -1331,7 +1331,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1354,7 +1354,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8>* %0,
@@ -1376,7 +1376,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1399,7 +1399,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8>* %0,
@@ -1421,7 +1421,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1444,7 +1444,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8>* %0,
@@ -1466,7 +1466,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16>* %0,
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1534,7 +1534,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16>* %0,
@@ -1556,7 +1556,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16>* %0,
@@ -1601,7 +1601,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1624,7 +1624,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16>* %0,
@@ -1646,7 +1646,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16>* %0,
@@ -1691,7 +1691,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1713,7 +1713,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1757,7 +1757,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -1779,7 +1779,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -1823,7 +1823,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -1867,7 +1867,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1889,7 +1889,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -1911,7 +1911,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1934,7 +1934,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64>* %0,
@@ -1956,7 +1956,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1979,7 +1979,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64>* %0,
@@ -2001,7 +2001,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2024,7 +2024,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64>* %0,
@@ -2046,7 +2046,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2069,7 +2069,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64>* %0,
@@ -2091,7 +2091,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2114,7 +2114,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
     <vscale x 1 x half>* %0,
@@ -2136,7 +2136,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2159,7 +2159,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
     <vscale x 2 x half>* %0,
@@ -2181,7 +2181,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2204,7 +2204,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
     <vscale x 4 x half>* %0,
@@ -2226,7 +2226,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
     <vscale x 8 x half>* %0,
@@ -2271,7 +2271,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2294,7 +2294,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
     <vscale x 16 x half>* %0,
@@ -2316,7 +2316,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2338,7 +2338,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
     <vscale x 1 x float>* %0,
@@ -2360,7 +2360,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
     <vscale x 2 x float>* %0,
@@ -2404,7 +2404,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
     <vscale x 4 x float>* %0,
@@ -2448,7 +2448,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2470,7 +2470,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
     <vscale x 8 x float>* %0,
@@ -2492,7 +2492,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2514,7 +2514,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
     <vscale x 16 x float>* %0,
@@ -2536,7 +2536,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2559,7 +2559,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
     <vscale x 1 x double>* %0,
@@ -2581,7 +2581,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2604,7 +2604,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
     <vscale x 2 x double>* %0,
@@ -2626,7 +2626,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2649,7 +2649,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
     <vscale x 4 x double>* %0,
@@ -2671,7 +2671,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2694,7 +2694,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
     <vscale x 8 x double>* %0,
@@ -2716,7 +2716,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2739,7 +2739,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8>* %0,
@@ -2761,7 +2761,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2784,7 +2784,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8>* %0,
@@ -2806,7 +2806,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2829,7 +2829,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8>* %0,
@@ -2851,7 +2851,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2874,7 +2874,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8>* %0,
@@ -2896,7 +2896,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2919,7 +2919,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8>* %0,
@@ -2941,7 +2941,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2964,7 +2964,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8>* %0,
@@ -2986,7 +2986,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3008,7 +3008,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -3030,7 +3030,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3052,7 +3052,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -3074,7 +3074,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3096,7 +3096,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -3118,7 +3118,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3140,7 +3140,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -3162,7 +3162,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3184,7 +3184,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -3206,7 +3206,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3228,7 +3228,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -3250,7 +3250,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3273,7 +3273,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32>* %0,
@@ -3295,7 +3295,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3318,7 +3318,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32>* %0,
@@ -3340,7 +3340,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3363,7 +3363,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32>* %0,
@@ -3385,7 +3385,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3408,7 +3408,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32>* %0,
@@ -3430,7 +3430,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3453,7 +3453,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32>* %0,
@@ -3475,7 +3475,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3498,7 +3498,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64>* %0,
@@ -3520,7 +3520,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3543,7 +3543,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64>* %0,
@@ -3565,7 +3565,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3588,7 +3588,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64>* %0,
@@ -3610,7 +3610,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3633,7 +3633,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64>* %0,
@@ -3655,7 +3655,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3677,7 +3677,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
     <vscale x 1 x half>* %0,
@@ -3699,7 +3699,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3721,7 +3721,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
     <vscale x 2 x half>* %0,
@@ -3743,7 +3743,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3765,7 +3765,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
     <vscale x 4 x half>* %0,
@@ -3787,7 +3787,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3809,7 +3809,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
     <vscale x 8 x half>* %0,
@@ -3831,7 +3831,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3853,7 +3853,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
     <vscale x 16 x half>* %0,
@@ -3875,7 +3875,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3897,7 +3897,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
     <vscale x 32 x half>* %0,
@@ -3919,7 +3919,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3942,7 +3942,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
     <vscale x 1 x float>* %0,
@@ -3964,7 +3964,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3987,7 +3987,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
     <vscale x 2 x float>* %0,
@@ -4009,7 +4009,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4032,7 +4032,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
     <vscale x 4 x float>* %0,
@@ -4054,7 +4054,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4077,7 +4077,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
     <vscale x 8 x float>* %0,
@@ -4099,7 +4099,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4122,7 +4122,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
     <vscale x 16 x float>* %0,
@@ -4144,7 +4144,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4167,7 +4167,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
     <vscale x 1 x double>* %0,
@@ -4189,7 +4189,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4212,7 +4212,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
     <vscale x 2 x double>* %0,
@@ -4234,7 +4234,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4257,7 +4257,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
     <vscale x 4 x double>* %0,
@@ -4279,7 +4279,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4302,7 +4302,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
     <vscale x 8 x double>* %0,
@@ -4324,7 +4324,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4346,7 +4346,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -4368,7 +4368,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4390,7 +4390,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -4412,7 +4412,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4434,7 +4434,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -4456,7 +4456,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4478,7 +4478,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -4500,7 +4500,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4522,7 +4522,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -4544,7 +4544,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4566,7 +4566,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -4588,7 +4588,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4610,7 +4610,7 @@ define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -4632,7 +4632,7 @@ define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4655,7 +4655,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16>* %0,
@@ -4677,7 +4677,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4700,7 +4700,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16>* %0,
@@ -4722,7 +4722,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4745,7 +4745,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16>* %0,
@@ -4767,7 +4767,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4790,7 +4790,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16>* %0,
@@ -4812,7 +4812,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4835,7 +4835,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16>* %0,
@@ -4857,7 +4857,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4880,7 +4880,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16>* %0,
@@ -4902,7 +4902,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4925,7 +4925,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32>* %0,
@@ -4947,7 +4947,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4970,7 +4970,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32>* %0,
@@ -4992,7 +4992,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5015,7 +5015,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32>* %0,
@@ -5037,7 +5037,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5060,7 +5060,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32>* %0,
@@ -5082,7 +5082,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5105,7 +5105,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32>* %0,
@@ -5127,7 +5127,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5150,7 +5150,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64>* %0,
@@ -5172,7 +5172,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5195,7 +5195,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64>* %0,
@@ -5217,7 +5217,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5240,7 +5240,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64>* %0,
@@ -5262,7 +5262,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5285,7 +5285,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64>* %0,
@@ -5307,7 +5307,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5330,7 +5330,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
     <vscale x 1 x half>* %0,
@@ -5352,7 +5352,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5375,7 +5375,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
     <vscale x 2 x half>* %0,
@@ -5397,7 +5397,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5420,7 +5420,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
     <vscale x 4 x half>* %0,
@@ -5442,7 +5442,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5465,7 +5465,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
     <vscale x 8 x half>* %0,
@@ -5487,7 +5487,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5510,7 +5510,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
     <vscale x 16 x half>* %0,
@@ -5532,7 +5532,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5555,7 +5555,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
     <vscale x 32 x half>* %0,
@@ -5577,7 +5577,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5600,7 +5600,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
     <vscale x 1 x float>* %0,
@@ -5622,7 +5622,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5645,7 +5645,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
     <vscale x 2 x float>* %0,
@@ -5667,7 +5667,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5690,7 +5690,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
     <vscale x 4 x float>* %0,
@@ -5712,7 +5712,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5735,7 +5735,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
     <vscale x 8 x float>* %0,
@@ -5757,7 +5757,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5780,7 +5780,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
     <vscale x 16 x float>* %0,
@@ -5802,7 +5802,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5825,7 +5825,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
     <vscale x 1 x double>* %0,
@@ -5847,7 +5847,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5870,7 +5870,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
     <vscale x 2 x double>* %0,
@@ -5892,7 +5892,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5915,7 +5915,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
     <vscale x 4 x double>* %0,
@@ -5937,7 +5937,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5960,7 +5960,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
     <vscale x 8 x double>* %0,
@@ -5982,7 +5982,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
index 77136f2ef1ac..af97244b9e7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>*,
   <vscale x 1 x i64>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8>* %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8>* %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8>* %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8>* %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16>* %0,
@@ -214,7 +214,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16>* %0,
@@ -259,7 +259,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16>* %0,
@@ -304,7 +304,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16>* %0,
@@ -349,7 +349,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32>* %0,
@@ -394,7 +394,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32>* %0,
@@ -439,7 +439,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32>* %0,
@@ -484,7 +484,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32>* %0,
@@ -529,7 +529,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -551,7 +551,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -573,7 +573,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -595,7 +595,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -617,7 +617,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -639,7 +639,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -661,7 +661,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -683,7 +683,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -705,7 +705,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -728,7 +728,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
     <vscale x 1 x half>* %0,
@@ -750,7 +750,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -773,7 +773,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
     <vscale x 2 x half>* %0,
@@ -795,7 +795,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -818,7 +818,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
     <vscale x 4 x half>* %0,
@@ -840,7 +840,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
     <vscale x 8 x half>* %0,
@@ -885,7 +885,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -908,7 +908,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
     <vscale x 1 x float>* %0,
@@ -930,7 +930,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -953,7 +953,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
     <vscale x 2 x float>* %0,
@@ -975,7 +975,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -998,7 +998,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
     <vscale x 4 x float>* %0,
@@ -1020,7 +1020,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
     <vscale x 8 x float>* %0,
@@ -1065,7 +1065,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1087,7 +1087,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
     <vscale x 1 x double>* %0,
@@ -1109,7 +1109,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1131,7 +1131,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
     <vscale x 2 x double>* %0,
@@ -1153,7 +1153,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
     <vscale x 4 x double>* %0,
@@ -1197,7 +1197,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1219,7 +1219,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
     <vscale x 8 x double>* %0,
@@ -1241,7 +1241,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1264,7 +1264,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8>* %0,
@@ -1286,7 +1286,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1309,7 +1309,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8>* %0,
@@ -1331,7 +1331,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1354,7 +1354,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8>* %0,
@@ -1376,7 +1376,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1399,7 +1399,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8>* %0,
@@ -1421,7 +1421,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1444,7 +1444,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8>* %0,
@@ -1466,7 +1466,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16>* %0,
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1534,7 +1534,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16>* %0,
@@ -1556,7 +1556,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16>* %0,
@@ -1601,7 +1601,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1624,7 +1624,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16>* %0,
@@ -1646,7 +1646,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16>* %0,
@@ -1691,7 +1691,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1713,7 +1713,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1757,7 +1757,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -1779,7 +1779,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -1823,7 +1823,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -1867,7 +1867,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1889,7 +1889,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -1911,7 +1911,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1934,7 +1934,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64>* %0,
@@ -1956,7 +1956,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1979,7 +1979,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64>* %0,
@@ -2001,7 +2001,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2024,7 +2024,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64>* %0,
@@ -2046,7 +2046,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2069,7 +2069,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64>* %0,
@@ -2091,7 +2091,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2114,7 +2114,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
     <vscale x 1 x half>* %0,
@@ -2136,7 +2136,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2159,7 +2159,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
     <vscale x 2 x half>* %0,
@@ -2181,7 +2181,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2204,7 +2204,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
     <vscale x 4 x half>* %0,
@@ -2226,7 +2226,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
     <vscale x 8 x half>* %0,
@@ -2271,7 +2271,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2294,7 +2294,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
     <vscale x 16 x half>* %0,
@@ -2316,7 +2316,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2338,7 +2338,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
     <vscale x 1 x float>* %0,
@@ -2360,7 +2360,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
     <vscale x 2 x float>* %0,
@@ -2404,7 +2404,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
     <vscale x 4 x float>* %0,
@@ -2448,7 +2448,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2470,7 +2470,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
     <vscale x 8 x float>* %0,
@@ -2492,7 +2492,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2514,7 +2514,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
     <vscale x 16 x float>* %0,
@@ -2536,7 +2536,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2559,7 +2559,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
     <vscale x 1 x double>* %0,
@@ -2581,7 +2581,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2604,7 +2604,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
     <vscale x 2 x double>* %0,
@@ -2626,7 +2626,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2649,7 +2649,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
     <vscale x 4 x double>* %0,
@@ -2671,7 +2671,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2694,7 +2694,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
     <vscale x 8 x double>* %0,
@@ -2716,7 +2716,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2739,7 +2739,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8>* %0,
@@ -2761,7 +2761,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2784,7 +2784,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8>* %0,
@@ -2806,7 +2806,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2829,7 +2829,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8>* %0,
@@ -2851,7 +2851,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2874,7 +2874,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8>* %0,
@@ -2896,7 +2896,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2919,7 +2919,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8>* %0,
@@ -2941,7 +2941,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2964,7 +2964,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8>* %0,
@@ -2986,7 +2986,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3008,7 +3008,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -3030,7 +3030,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3052,7 +3052,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -3074,7 +3074,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3096,7 +3096,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -3118,7 +3118,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3140,7 +3140,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -3162,7 +3162,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3184,7 +3184,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -3206,7 +3206,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3228,7 +3228,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -3250,7 +3250,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3273,7 +3273,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32>* %0,
@@ -3295,7 +3295,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3318,7 +3318,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32>* %0,
@@ -3340,7 +3340,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3363,7 +3363,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32>* %0,
@@ -3385,7 +3385,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3408,7 +3408,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32>* %0,
@@ -3430,7 +3430,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3453,7 +3453,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32>* %0,
@@ -3475,7 +3475,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3498,7 +3498,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64>* %0,
@@ -3520,7 +3520,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3543,7 +3543,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64>* %0,
@@ -3565,7 +3565,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3588,7 +3588,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64>* %0,
@@ -3610,7 +3610,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3633,7 +3633,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64>* %0,
@@ -3655,7 +3655,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3677,7 +3677,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
     <vscale x 1 x half>* %0,
@@ -3699,7 +3699,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3721,7 +3721,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
     <vscale x 2 x half>* %0,
@@ -3743,7 +3743,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3765,7 +3765,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
     <vscale x 4 x half>* %0,
@@ -3787,7 +3787,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3809,7 +3809,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
     <vscale x 8 x half>* %0,
@@ -3831,7 +3831,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3853,7 +3853,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
     <vscale x 16 x half>* %0,
@@ -3875,7 +3875,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3897,7 +3897,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
     <vscale x 32 x half>* %0,
@@ -3919,7 +3919,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3942,7 +3942,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
     <vscale x 1 x float>* %0,
@@ -3964,7 +3964,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3987,7 +3987,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
     <vscale x 2 x float>* %0,
@@ -4009,7 +4009,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4032,7 +4032,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
     <vscale x 4 x float>* %0,
@@ -4054,7 +4054,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4077,7 +4077,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
     <vscale x 8 x float>* %0,
@@ -4099,7 +4099,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4122,7 +4122,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
     <vscale x 16 x float>* %0,
@@ -4144,7 +4144,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4167,7 +4167,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
     <vscale x 1 x double>* %0,
@@ -4189,7 +4189,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4212,7 +4212,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
     <vscale x 2 x double>* %0,
@@ -4234,7 +4234,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4257,7 +4257,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
     <vscale x 4 x double>* %0,
@@ -4279,7 +4279,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4302,7 +4302,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
     <vscale x 8 x double>* %0,
@@ -4324,7 +4324,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4346,7 +4346,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -4368,7 +4368,7 @@ define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4390,7 +4390,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -4412,7 +4412,7 @@ define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4434,7 +4434,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -4456,7 +4456,7 @@ define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4478,7 +4478,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -4500,7 +4500,7 @@ define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4522,7 +4522,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -4544,7 +4544,7 @@ define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4566,7 +4566,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -4588,7 +4588,7 @@ define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4610,7 +4610,7 @@ define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -4632,7 +4632,7 @@ define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4655,7 +4655,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16>* %0,
@@ -4677,7 +4677,7 @@ define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4700,7 +4700,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16>* %0,
@@ -4722,7 +4722,7 @@ define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4745,7 +4745,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16>* %0,
@@ -4767,7 +4767,7 @@ define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4790,7 +4790,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16>* %0,
@@ -4812,7 +4812,7 @@ define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4835,7 +4835,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16>* %0,
@@ -4857,7 +4857,7 @@ define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4880,7 +4880,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16>* %0,
@@ -4902,7 +4902,7 @@ define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4925,7 +4925,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32>* %0,
@@ -4947,7 +4947,7 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4970,7 +4970,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32>* %0,
@@ -4992,7 +4992,7 @@ define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5015,7 +5015,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32>* %0,
@@ -5037,7 +5037,7 @@ define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5060,7 +5060,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32>* %0,
@@ -5082,7 +5082,7 @@ define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5105,7 +5105,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32>* %0,
@@ -5127,7 +5127,7 @@ define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5150,7 +5150,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64>* %0,
@@ -5172,7 +5172,7 @@ define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5195,7 +5195,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64>* %0,
@@ -5217,7 +5217,7 @@ define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5240,7 +5240,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64>* %0,
@@ -5262,7 +5262,7 @@ define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5285,7 +5285,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64>* %0,
@@ -5307,7 +5307,7 @@ define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5330,7 +5330,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
     <vscale x 1 x half>* %0,
@@ -5352,7 +5352,7 @@ define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5375,7 +5375,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
     <vscale x 2 x half>* %0,
@@ -5397,7 +5397,7 @@ define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5420,7 +5420,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
     <vscale x 4 x half>* %0,
@@ -5442,7 +5442,7 @@ define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5465,7 +5465,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
     <vscale x 8 x half>* %0,
@@ -5487,7 +5487,7 @@ define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5510,7 +5510,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
     <vscale x 16 x half>* %0,
@@ -5532,7 +5532,7 @@ define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5555,7 +5555,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
     <vscale x 32 x half>* %0,
@@ -5577,7 +5577,7 @@ define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5600,7 +5600,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
     <vscale x 1 x float>* %0,
@@ -5622,7 +5622,7 @@ define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5645,7 +5645,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
     <vscale x 2 x float>* %0,
@@ -5667,7 +5667,7 @@ define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5690,7 +5690,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
     <vscale x 4 x float>* %0,
@@ -5712,7 +5712,7 @@ define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5735,7 +5735,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
     <vscale x 8 x float>* %0,
@@ -5757,7 +5757,7 @@ define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5780,7 +5780,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
     <vscale x 16 x float>* %0,
@@ -5802,7 +5802,7 @@ define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5825,7 +5825,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
     <vscale x 1 x double>* %0,
@@ -5847,7 +5847,7 @@ define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5870,7 +5870,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
     <vscale x 2 x double>* %0,
@@ -5892,7 +5892,7 @@ define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5915,7 +5915,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
     <vscale x 4 x double>* %0,
@@ -5937,7 +5937,7 @@ define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5960,7 +5960,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
     <vscale x 8 x double>* %0,
@@ -5982,7 +5982,7 @@ define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
index 490f744ef59b..7e977783d792 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8>  @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i8>  @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i8>  @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8>  @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i8>  @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i8>  @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i16>  @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i16>  @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16>  @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i16>  @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i16>  @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i32>  @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32>  @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32>  @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i32>  @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64>  @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64>  @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i64>  @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i64>  @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i64>  @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i64>  @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i8>  @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@ define <vscale x 2 x i8>  @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@ define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@ define <vscale x 4 x i8>  @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@ define <vscale x 8 x i8>  @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i8>  @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 32 x i8>  @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 1 x i16>  @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 2 x i16>  @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x i16>  @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i16>  @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 16 x i16>  @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 1 x i32>  @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@ define <vscale x 2 x i32>  @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i32>  @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@ define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@ define <vscale x 8 x i32>  @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 1 x i64>  @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v25, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1567,7 +1567,7 @@ define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1597,7 +1597,7 @@ define <vscale x 2 x i64>  @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v26, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1627,7 +1627,7 @@ define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1657,7 +1657,7 @@ define <vscale x 4 x i64>  @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v28, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
index 46839d38b0cf..b82c66797c44 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8>  @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i8>  @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i8>  @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8>  @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i8>  @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i8>  @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i16>  @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i16>  @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16>  @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i16>  @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i16>  @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i32>  @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32>  @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32>  @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i32>  @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64>  @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64>  @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i64>  @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i64>  @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i64>  @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i64>  @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i8>  @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@ define <vscale x 2 x i8>  @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@ define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@ define <vscale x 4 x i8>  @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@ define <vscale x 8 x i8>  @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i8>  @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 32 x i8>  @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 1 x i16>  @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 2 x i16>  @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x i16>  @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i16>  @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 16 x i16>  @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 1 x i32>  @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@ define <vscale x 2 x i32>  @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i32>  @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@ define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@ define <vscale x 8 x i32>  @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1530,7 +1530,7 @@ define <vscale x 1 x i64>  @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1576,7 +1576,7 @@ define <vscale x 2 x i64>  @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1622,7 +1622,7 @@ define <vscale x 4 x i64>  @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1645,7 +1645,7 @@ define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
index 86afd6e89a60..be4ba09a31cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -151,7 +151,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -171,7 +171,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -191,7 +191,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -211,7 +211,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -231,7 +231,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -251,7 +251,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -291,7 +291,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -331,7 +331,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -351,7 +351,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -371,7 +371,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -391,7 +391,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -411,7 +411,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -431,7 +431,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -471,7 +471,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -491,7 +491,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -511,7 +511,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -551,7 +551,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -571,7 +571,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -591,7 +591,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -611,7 +611,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -631,7 +631,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -651,7 +651,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -671,7 +671,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -691,7 +691,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -731,7 +731,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -751,7 +751,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -791,7 +791,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmadc.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmadc.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmadc.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -895,7 +895,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -910,7 +910,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -925,7 +925,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -940,7 +940,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -970,7 +970,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -985,7 +985,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1000,7 +1000,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1015,7 +1015,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1030,7 +1030,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1045,7 +1045,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1060,7 +1060,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1075,7 +1075,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1090,7 +1090,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1105,7 +1105,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1120,7 +1120,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1150,7 +1150,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1165,7 +1165,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1180,7 +1180,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1210,7 +1210,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
index 2a4d8a8ecf96..a9fd51b7c19d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -151,7 +151,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -171,7 +171,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -191,7 +191,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -211,7 +211,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -231,7 +231,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -251,7 +251,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -291,7 +291,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -331,7 +331,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -351,7 +351,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -371,7 +371,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -391,7 +391,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -411,7 +411,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -431,7 +431,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -471,7 +471,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -491,7 +491,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -511,7 +511,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -551,7 +551,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -571,7 +571,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -591,7 +591,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -611,7 +611,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -631,7 +631,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -651,7 +651,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -671,7 +671,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -691,7 +691,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -731,7 +731,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -751,7 +751,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -791,7 +791,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -811,7 +811,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -831,7 +831,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -851,7 +851,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -871,7 +871,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -901,7 +901,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -931,7 +931,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -946,7 +946,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -961,7 +961,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -976,7 +976,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -991,7 +991,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1006,7 +1006,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1036,7 +1036,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1051,7 +1051,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1081,7 +1081,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1096,7 +1096,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1156,7 +1156,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1171,7 +1171,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1186,7 +1186,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1201,7 +1201,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
index 0186e0ce8aa8..cbd6e607153e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -36,7 +36,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -59,7 +59,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -82,7 +82,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -105,7 +105,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -128,7 +128,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -151,7 +151,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -174,7 +174,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -197,7 +197,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -220,7 +220,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -243,7 +243,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -266,7 +266,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -289,7 +289,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -312,7 +312,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -335,7 +335,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -358,7 +358,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -381,7 +381,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -404,7 +404,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -450,7 +450,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -473,7 +473,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -542,7 +542,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -565,7 +565,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -611,7 +611,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -634,7 +634,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -657,7 +657,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -680,7 +680,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -703,7 +703,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -726,7 +726,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -749,7 +749,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -772,7 +772,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vs
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -795,7 +795,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vs
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -818,7 +818,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -841,7 +841,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -864,7 +864,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -887,7 +887,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -910,7 +910,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vs
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -939,7 +939,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscal
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -968,7 +968,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscal
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -997,7 +997,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscal
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v28, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1026,7 +1026,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscal
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1043,7 +1043,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1060,7 +1060,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1077,7 +1077,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1094,7 +1094,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1128,7 +1128,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1145,7 +1145,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1179,7 +1179,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1196,7 +1196,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1213,7 +1213,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1230,7 +1230,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1264,7 +1264,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1281,7 +1281,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1298,7 +1298,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1315,7 +1315,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1332,7 +1332,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1349,7 +1349,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1366,7 +1366,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1383,7 +1383,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1400,7 +1400,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
index 349efa4ce54d..a35947488f0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -36,7 +36,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -59,7 +59,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -82,7 +82,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -105,7 +105,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -128,7 +128,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -151,7 +151,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -174,7 +174,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -197,7 +197,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -220,7 +220,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -243,7 +243,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -266,7 +266,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -289,7 +289,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -312,7 +312,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -335,7 +335,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -358,7 +358,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -381,7 +381,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -404,7 +404,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -450,7 +450,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -473,7 +473,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -542,7 +542,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -565,7 +565,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -611,7 +611,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -634,7 +634,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -657,7 +657,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -680,7 +680,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -703,7 +703,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -726,7 +726,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -749,7 +749,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -772,7 +772,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vs
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -795,7 +795,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vs
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -818,7 +818,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -841,7 +841,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -864,7 +864,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -887,7 +887,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -910,7 +910,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vs
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -933,7 +933,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1036,7 +1036,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1053,7 +1053,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1087,7 +1087,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1104,7 +1104,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1121,7 +1121,7 @@ define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1155,7 +1155,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1172,7 +1172,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1189,7 +1189,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1206,7 +1206,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1223,7 +1223,7 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1240,7 +1240,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1257,7 +1257,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1274,7 +1274,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1308,7 +1308,7 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1325,7 +1325,7 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1342,7 +1342,7 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1359,7 +1359,7 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1376,7 +1376,7 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
index 8105f155171e..d01fb2e6ff8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8>  @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i8>  @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i8>  @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8>  @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i8>  @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i8>  @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i16>  @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i16>  @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16>  @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i16>  @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i16>  @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i32>  @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32>  @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32>  @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i32>  @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64>  @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i64>  @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i64>  @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i8>  @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@ define <vscale x 2 x i8>  @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@ define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@ define <vscale x 4 x i8>  @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@ define <vscale x 8 x i8>  @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i8>  @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 32 x i8>  @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 1 x i16>  @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 2 x i16>  @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x i16>  @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i16>  @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 16 x i16>  @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 1 x i32>  @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@ define <vscale x 2 x i32>  @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i32>  @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@ define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@ define <vscale x 8 x i32>  @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 1 x i64>  @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v25, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1567,7 +1567,7 @@ define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1597,7 +1597,7 @@ define <vscale x 2 x i64>  @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v26, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1627,7 +1627,7 @@ define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1657,7 +1657,7 @@ define <vscale x 4 x i64>  @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v28, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
index f4529043b05a..0f0c2417e42a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8>  @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i8>  @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i8>  @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8>  @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i8>  @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i8>  @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i16>  @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i16>  @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16>  @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i16>  @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i16>  @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i32>  @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32>  @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32>  @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i32>  @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64>  @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i64>  @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i64>  @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i8>  @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@ define <vscale x 2 x i8>  @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@ define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@ define <vscale x 4 x i8>  @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@ define <vscale x 8 x i8>  @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i8>  @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 32 x i8>  @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 1 x i16>  @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 2 x i16>  @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x i16>  @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i16>  @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 16 x i16>  @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 1 x i32>  @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@ define <vscale x 2 x i32>  @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i32>  @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@ define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@ define <vscale x 8 x i32>  @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1530,7 +1530,7 @@ define <vscale x 1 x i64>  @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1576,7 +1576,7 @@ define <vscale x 2 x i64>  @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1622,7 +1622,7 @@ define <vscale x 4 x i64>  @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1645,7 +1645,7 @@ define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
index 4f93f7c667e7..c8894f33a55d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmand_mm_nxv16i1(<vscale x 16 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmand_mm_nxv32i1(<vscale x 32 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmand_mm_nxv64i1(<vscale x 64 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
index 304606c0e20c..0ab530c8a551 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmand_mm_nxv16i1(<vscale x 16 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmand_mm_nxv32i1(<vscale x 32 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmand_mm_nxv64i1(<vscale x 64 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
index 2d5718ccbbc7..9de2d8da9d19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmandnot_mm_nxv1i1(<vscale x 1 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmandnot_mm_nxv2i1(<vscale x 2 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmandnot_mm_nxv4i1(<vscale x 4 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmandnot_mm_nxv8i1(<vscale x 8 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmandnot_mm_nxv16i1(<vscale x 16 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmandnot_mm_nxv32i1(<vscale x 32 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmandnot_mm_nxv64i1(<vscale x 64 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
index e2b9ec16210a..4f000bcdb6c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmandnot_mm_nxv1i1(<vscale x 1 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmandnot_mm_nxv2i1(<vscale x 2 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmandnot_mm_nxv4i1(<vscale x 4 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmandnot_mm_nxv8i1(<vscale x 8 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmandnot_mm_nxv16i1(<vscale x 16 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmandnot_mm_nxv32i1(<vscale x 32 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmandnot_mm_nxv64i1(<vscale x 64 x i1> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
index bcba9a926553..b920ecfe8375 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmax.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmax.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmax.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
index 792adc817130..20031221ead1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
index 9a9ca53738c0..60e3f7014cc3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
index 77a8506fab6d..5c6a106d42d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll
index 451dc77cf773..a58733df2ccc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 2 x i8> @intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@ define <vscale x 4 x i8> @intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@ define <vscale x 8 x i8> @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 16 x i8> @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@ define <vscale x 32 x i8> @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@ define <vscale x 64 x i8> @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@ define <vscale x 1 x i16> @intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@ define <vscale x 2 x i16> @intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i16> @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@ define <vscale x 8 x i16> @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@ define <vscale x 16 x i16> @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@ define <vscale x 32 x i16> @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x i32> @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@ define <vscale x 4 x i32> @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@ define <vscale x 8 x i32> @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@ define <vscale x 16 x i32> @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@ define <vscale x 1 x i64> @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@ define <vscale x 2 x i64> @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@ define <vscale x 4 x i64> @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i64> @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 1 x i8> @intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i8> @intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@ define <vscale x 4 x i8> @intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@ define <vscale x 8 x i8> @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@ define <vscale x 16 x i8> @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@ define <vscale x 32 x i8> @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@ define <vscale x 64 x i8> @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@ define <vscale x 1 x i16> @intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@ define <vscale x 2 x i16> @intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@ define <vscale x 4 x i16> @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@ define <vscale x 8 x i16> @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@ define <vscale x 16 x i16> @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@ define <vscale x 32 x i16> @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@ define <vscale x 1 x i32> @intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@ define <vscale x 2 x i32> @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@ define <vscale x 4 x i32> @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@ define <vscale x 8 x i32> @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@ define <vscale x 16 x i32> @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -898,7 +898,7 @@ define <vscale x 1 x i64> @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v25, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -926,7 +926,7 @@ define <vscale x 2 x i64> @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v26, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -954,7 +954,7 @@ define <vscale x 4 x i64> @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v28, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -982,7 +982,7 @@ define <vscale x 8 x i64> @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -998,7 +998,7 @@ define <vscale x 1 x i8> @intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1014,7 +1014,7 @@ define <vscale x 2 x i8> @intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1030,7 +1030,7 @@ define <vscale x 4 x i8> @intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1046,7 +1046,7 @@ define <vscale x 8 x i8> @intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1062,7 +1062,7 @@ define <vscale x 16 x i8> @intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1078,7 +1078,7 @@ define <vscale x 32 x i8> @intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1094,7 +1094,7 @@ define <vscale x 64 x i8> @intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1110,7 +1110,7 @@ define <vscale x 1 x i16> @intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 2 x i16> @intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1142,7 +1142,7 @@ define <vscale x 4 x i16> @intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 8 x i16> @intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1174,7 +1174,7 @@ define <vscale x 16 x i16> @intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1190,7 +1190,7 @@ define <vscale x 32 x i16> @intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1206,7 +1206,7 @@ define <vscale x 1 x i32> @intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1222,7 +1222,7 @@ define <vscale x 2 x i32> @intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1238,7 +1238,7 @@ define <vscale x 4 x i32> @intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i32> @intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1270,7 +1270,7 @@ define <vscale x 16 x i32> @intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1286,7 +1286,7 @@ define <vscale x 1 x i64> @intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1302,7 +1302,7 @@ define <vscale x 2 x i64> @intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1318,7 +1318,7 @@ define <vscale x 4 x i64> @intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1334,7 +1334,7 @@ define <vscale x 8 x i64> @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll
index 172330bd9c46..d8c91a281a2d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 2 x i8> @intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@ define <vscale x 4 x i8> @intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@ define <vscale x 8 x i8> @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 16 x i8> @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@ define <vscale x 32 x i8> @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@ define <vscale x 64 x i8> @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@ define <vscale x 1 x i16> @intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@ define <vscale x 2 x i16> @intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i16> @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@ define <vscale x 8 x i16> @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@ define <vscale x 16 x i16> @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@ define <vscale x 32 x i16> @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x i32> @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@ define <vscale x 4 x i32> @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@ define <vscale x 8 x i32> @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@ define <vscale x 16 x i32> @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@ define <vscale x 1 x i64> @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@ define <vscale x 2 x i64> @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@ define <vscale x 4 x i64> @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i64> @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 1 x i8> @intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i8> @intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@ define <vscale x 4 x i8> @intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@ define <vscale x 8 x i8> @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@ define <vscale x 16 x i8> @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@ define <vscale x 32 x i8> @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@ define <vscale x 64 x i8> @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@ define <vscale x 1 x i16> @intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@ define <vscale x 2 x i16> @intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@ define <vscale x 4 x i16> @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@ define <vscale x 8 x i16> @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@ define <vscale x 16 x i16> @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@ define <vscale x 32 x i16> @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@ define <vscale x 1 x i32> @intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@ define <vscale x 2 x i32> @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@ define <vscale x 4 x i32> @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@ define <vscale x 8 x i32> @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@ define <vscale x 16 x i32> @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -892,7 +892,7 @@ define <vscale x 1 x i64> @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -914,7 +914,7 @@ define <vscale x 2 x i64> @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -936,7 +936,7 @@ define <vscale x 4 x i64> @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -958,7 +958,7 @@ define <vscale x 8 x i64> @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x i8> @intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -990,7 +990,7 @@ define <vscale x 2 x i8> @intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1006,7 +1006,7 @@ define <vscale x 4 x i8> @intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1022,7 +1022,7 @@ define <vscale x 8 x i8> @intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1038,7 +1038,7 @@ define <vscale x 16 x i8> @intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 32 x i8> @intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 64 x i8> @intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1086,7 +1086,7 @@ define <vscale x 1 x i16> @intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1102,7 +1102,7 @@ define <vscale x 2 x i16> @intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 4 x i16> @intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1134,7 +1134,7 @@ define <vscale x 8 x i16> @intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1150,7 +1150,7 @@ define <vscale x 16 x i16> @intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1166,7 +1166,7 @@ define <vscale x 32 x i16> @intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1198,7 +1198,7 @@ define <vscale x 2 x i32> @intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1214,7 +1214,7 @@ define <vscale x 4 x i32> @intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1230,7 +1230,7 @@ define <vscale x 8 x i32> @intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1246,7 +1246,7 @@ define <vscale x 16 x i32> @intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1262,7 +1262,7 @@ define <vscale x 1 x i64> @intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1278,7 +1278,7 @@ define <vscale x 2 x i64> @intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1310,7 +1310,7 @@ define <vscale x 8 x i64> @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
index f588b5743ae9..a9549a484c92 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
index 62b625f52b84..16c169e77f0b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
index 2415e9237ed8..4a46b5efd4ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
index da01f1266ae1..efdb0119d28e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
index 5d225245bbcb..e4d2a71d79aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
index 780640e28abb..9f53c61f1d98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
index 23f1ff618eb8..5c84a6bd2ccb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
index f9568042936e..1f1adb930164 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
index 4bed8e42a368..36297ef15414 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
index 4450de625e86..1af327539247 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
index a849c0157d61..fca61cc1e089 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
index ce840a646605..d5eb28044b93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f16_nxv1f16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f16_nxv2f16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f16_nxv4f16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f16_nxv8f16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_vv_nxv16f16_nxv16f16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f32_nxv1f32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f32_nxv2f32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f32_nxv4f32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f32_nxv8f32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f64_nxv1f64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f64_nxv2f64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f64_nxv4f64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f16_f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f16_f16(<vscale x 2 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f16_f16(<vscale x 4 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f16_f16(<vscale x 8 x half> %0,
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_vf_nxv16f16_f16(<vscale x 16 x half>
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f32_f32(<vscale x 1 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f32_f32(<vscale x 2 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f32_f32(<vscale x 4 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f32_f32(<vscale x 8 x float> %0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f64_f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f64_f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f64_f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
index 3124b305f0ec..580e236ae803 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmin.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmin.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmin.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
index 634b2680283e..065345d57b2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
index 497a4c7f0037..3cca47ae78db 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vminu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vminu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vminu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
index d068f9ab1dcd..999c746531d6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
index 69ca931e93d9..f66b99828d9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmnand_mm_nxv1i1(<vscale x 1 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmnand_mm_nxv2i1(<vscale x 2 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmnand_mm_nxv4i1(<vscale x 4 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmnand_mm_nxv8i1(<vscale x 8 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmnand_mm_nxv16i1(<vscale x 16 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmnand_mm_nxv32i1(<vscale x 32 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmnand_mm_nxv64i1(<vscale x 64 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
index efdd113797d4..1da5906d530d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmnand_mm_nxv1i1(<vscale x 1 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmnand_mm_nxv2i1(<vscale x 2 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmnand_mm_nxv4i1(<vscale x 4 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmnand_mm_nxv8i1(<vscale x 8 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmnand_mm_nxv16i1(<vscale x 16 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmnand_mm_nxv32i1(<vscale x 32 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmnand_mm_nxv64i1(<vscale x 64 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
index 8fcbcc12ac05..026435964dcd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
index 585998682ed2..4187f97809a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
index 5e1181bc4ad4..395f99e13218 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmor_mm_nxv16i1(<vscale x 16 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmor_mm_nxv32i1(<vscale x 32 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmor_mm_nxv64i1(<vscale x 64 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
index d7404944d517..b7ac4787530e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmor_mm_nxv16i1(<vscale x 16 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmor_mm_nxv32i1(<vscale x 32 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmor_mm_nxv64i1(<vscale x 64 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
index 3f4f36f1c9af..07312285e61a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmornot_mm_nxv1i1(<vscale x 1 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmornot_mm_nxv2i1(<vscale x 2 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmornot_mm_nxv4i1(<vscale x 4 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmornot_mm_nxv8i1(<vscale x 8 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmornot_mm_nxv16i1(<vscale x 16 x i1> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmornot_mm_nxv32i1(<vscale x 32 x i1> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmornot_mm_nxv64i1(<vscale x 64 x i1> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
index 338862b4f037..28337aa78cc0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmornot_mm_nxv1i1(<vscale x 1 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmornot_mm_nxv2i1(<vscale x 2 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmornot_mm_nxv4i1(<vscale x 4 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmornot_mm_nxv8i1(<vscale x 8 x i1> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmornot_mm_nxv16i1(<vscale x 16 x i1> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmornot_mm_nxv32i1(<vscale x 32 x i1> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmornot_mm_nxv64i1(<vscale x 64 x i1> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
index 2f82f93e1dca..344bcfebc979 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -151,7 +151,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -171,7 +171,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -191,7 +191,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -211,7 +211,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -231,7 +231,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -251,7 +251,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -291,7 +291,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -331,7 +331,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -351,7 +351,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -371,7 +371,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -391,7 +391,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -411,7 +411,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -431,7 +431,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -471,7 +471,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -491,7 +491,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -511,7 +511,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -551,7 +551,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -571,7 +571,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -591,7 +591,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -611,7 +611,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -631,7 +631,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -651,7 +651,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -671,7 +671,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -691,7 +691,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -731,7 +731,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -751,7 +751,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -791,7 +791,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -895,7 +895,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
index a1df2b2abec5..a2d3fe13b763 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -151,7 +151,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -171,7 +171,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -191,7 +191,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -211,7 +211,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -231,7 +231,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -251,7 +251,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -271,7 +271,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -291,7 +291,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -331,7 +331,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -351,7 +351,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -371,7 +371,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -391,7 +391,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -411,7 +411,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -431,7 +431,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -451,7 +451,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -471,7 +471,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -491,7 +491,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -511,7 +511,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -551,7 +551,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -571,7 +571,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -591,7 +591,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -611,7 +611,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -631,7 +631,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -651,7 +651,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -671,7 +671,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -691,7 +691,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -731,7 +731,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -751,7 +751,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -791,7 +791,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -811,7 +811,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -831,7 +831,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -851,7 +851,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -871,7 +871,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
index 98999eaa9044..bcdc9906c97b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -36,7 +36,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -59,7 +59,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -82,7 +82,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -105,7 +105,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -128,7 +128,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -151,7 +151,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -174,7 +174,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -197,7 +197,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -220,7 +220,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -243,7 +243,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -266,7 +266,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -289,7 +289,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -312,7 +312,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -335,7 +335,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -358,7 +358,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -381,7 +381,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -404,7 +404,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -450,7 +450,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -473,7 +473,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -542,7 +542,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -565,7 +565,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -611,7 +611,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -634,7 +634,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -657,7 +657,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -680,7 +680,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -703,7 +703,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -726,7 +726,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -749,7 +749,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -772,7 +772,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<v
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -795,7 +795,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<v
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -818,7 +818,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -841,7 +841,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -864,7 +864,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -887,7 +887,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -910,7 +910,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<v
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -939,7 +939,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vsca
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -968,7 +968,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vsca
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -997,7 +997,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vsca
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v28, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1026,7 +1026,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vsca
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
index 96a98b8f430a..dd8ce7465dcc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -13,7 +13,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -36,7 +36,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -59,7 +59,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -82,7 +82,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -105,7 +105,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -128,7 +128,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -151,7 +151,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -174,7 +174,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -197,7 +197,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -220,7 +220,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -243,7 +243,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -266,7 +266,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -289,7 +289,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -312,7 +312,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -335,7 +335,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -358,7 +358,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -381,7 +381,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -404,7 +404,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -427,7 +427,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -450,7 +450,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -473,7 +473,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -519,7 +519,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -542,7 +542,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -565,7 +565,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -611,7 +611,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -634,7 +634,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -657,7 +657,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vsc
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -680,7 +680,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -703,7 +703,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -726,7 +726,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -749,7 +749,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -772,7 +772,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<v
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -795,7 +795,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<v
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -818,7 +818,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -841,7 +841,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -864,7 +864,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -887,7 +887,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -910,7 +910,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<v
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -933,7 +933,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -956,7 +956,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -979,7 +979,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
index 5f290d4f0fff..1f320ab23c76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbf_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbf_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbf_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbf_m_nxv16i1(<vscale x 16 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbf_m_nxv32i1(<vscale x 32 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_m_nxv64i1(<vscale x 64 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
index fc3ffa57d17e..a057b90066fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbf_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@ define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbf_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@ define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbf_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@ define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbf_m_nxv16i1(<vscale x 16 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@ define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbf_m_nxv32i1(<vscale x 32 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@ define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_m_nxv64i1(<vscale x 64 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
index 5e1f4d1e8b14..7ac570af0183 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmseq.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmseq.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmseq.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmseq.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmseq.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmseq.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
index 92bd7ed080f6..8a1b9b8ba872 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
index be2e83d4a4d3..560fa18466af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -948,7 +948,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1023,7 +1023,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1046,7 +1046,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1072,7 +1072,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1095,7 +1095,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1121,7 +1121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1144,7 +1144,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1170,7 +1170,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1193,7 +1193,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1219,7 +1219,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1242,7 +1242,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1268,7 +1268,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1366,7 +1366,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1389,7 +1389,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1487,7 +1487,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1513,7 +1513,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1562,7 +1562,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1585,7 +1585,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1611,7 +1611,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1634,7 +1634,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1660,7 +1660,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1688,7 +1688,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1748,7 +1748,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1780,7 +1780,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1808,7 +1808,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1840,7 +1840,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsle.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1857,7 +1857,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1875,7 +1875,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1892,7 +1892,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1910,7 +1910,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1927,7 +1927,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1945,7 +1945,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1962,7 +1962,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1980,7 +1980,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1997,7 +1997,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2015,7 +2015,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2032,7 +2032,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2050,7 +2050,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2067,7 +2067,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2085,7 +2085,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2102,7 +2102,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2120,7 +2120,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2137,7 +2137,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2155,7 +2155,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2172,7 +2172,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2207,7 +2207,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2225,7 +2225,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2242,7 +2242,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2260,7 +2260,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2277,7 +2277,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2295,7 +2295,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2312,7 +2312,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2347,7 +2347,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2400,7 +2400,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2417,7 +2417,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2435,7 +2435,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2452,7 +2452,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2470,7 +2470,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -2489,7 +2489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -2507,7 +2507,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -2543,7 +2543,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -2561,7 +2561,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2579,7 +2579,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2597,7 +2597,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2615,7 +2615,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2651,7 +2651,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2669,7 +2669,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2687,7 +2687,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2705,7 +2705,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2723,7 +2723,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2741,7 +2741,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2767,7 +2767,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2793,7 +2793,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2819,7 +2819,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vmsle.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
index a8cf4a086935..0b16145c5753 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -948,7 +948,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1023,7 +1023,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1046,7 +1046,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1072,7 +1072,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1095,7 +1095,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1121,7 +1121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1144,7 +1144,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1170,7 +1170,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1193,7 +1193,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1219,7 +1219,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1242,7 +1242,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1268,7 +1268,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1366,7 +1366,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1389,7 +1389,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1487,7 +1487,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1513,7 +1513,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1562,7 +1562,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1585,7 +1585,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1611,7 +1611,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1634,7 +1634,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1660,7 +1660,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1683,7 +1683,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1732,7 +1732,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1758,7 +1758,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1807,7 +1807,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1824,7 +1824,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1842,7 +1842,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1859,7 +1859,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1877,7 +1877,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1894,7 +1894,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1912,7 +1912,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1947,7 +1947,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1964,7 +1964,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1982,7 +1982,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1999,7 +1999,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2017,7 +2017,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2034,7 +2034,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2052,7 +2052,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2069,7 +2069,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2087,7 +2087,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2104,7 +2104,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2122,7 +2122,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2139,7 +2139,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2157,7 +2157,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2174,7 +2174,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2192,7 +2192,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2209,7 +2209,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2227,7 +2227,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2244,7 +2244,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2262,7 +2262,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2279,7 +2279,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2297,7 +2297,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2314,7 +2314,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2332,7 +2332,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2349,7 +2349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2367,7 +2367,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2384,7 +2384,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2402,7 +2402,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2419,7 +2419,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2437,7 +2437,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -2456,7 +2456,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -2474,7 +2474,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -2492,7 +2492,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -2528,7 +2528,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2546,7 +2546,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2564,7 +2564,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2582,7 +2582,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2600,7 +2600,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2636,7 +2636,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2654,7 +2654,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2672,7 +2672,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2690,7 +2690,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2708,7 +2708,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2726,7 +2726,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2744,7 +2744,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2762,7 +2762,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
index c0161495f932..cb1d8201b8a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -948,7 +948,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1023,7 +1023,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1046,7 +1046,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1072,7 +1072,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1095,7 +1095,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1121,7 +1121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1144,7 +1144,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1170,7 +1170,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1193,7 +1193,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1219,7 +1219,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1242,7 +1242,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1268,7 +1268,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1366,7 +1366,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1389,7 +1389,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1487,7 +1487,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1513,7 +1513,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1562,7 +1562,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1585,7 +1585,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1611,7 +1611,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1634,7 +1634,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1660,7 +1660,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1688,7 +1688,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1748,7 +1748,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1780,7 +1780,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1808,7 +1808,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1840,7 +1840,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsleu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1857,7 +1857,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1875,7 +1875,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1892,7 +1892,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1910,7 +1910,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1927,7 +1927,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1945,7 +1945,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1962,7 +1962,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1980,7 +1980,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1997,7 +1997,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2015,7 +2015,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2032,7 +2032,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2050,7 +2050,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2067,7 +2067,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2085,7 +2085,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2102,7 +2102,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2120,7 +2120,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2137,7 +2137,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2155,7 +2155,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2172,7 +2172,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2207,7 +2207,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2225,7 +2225,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2242,7 +2242,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2260,7 +2260,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2277,7 +2277,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2295,7 +2295,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2312,7 +2312,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2347,7 +2347,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2400,7 +2400,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2417,7 +2417,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2435,7 +2435,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2452,7 +2452,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2470,7 +2470,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -2489,7 +2489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -2507,7 +2507,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -2543,7 +2543,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -2561,7 +2561,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2579,7 +2579,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2597,7 +2597,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2615,7 +2615,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2651,7 +2651,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2669,7 +2669,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2687,7 +2687,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2705,7 +2705,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2723,7 +2723,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2741,7 +2741,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2767,7 +2767,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2793,7 +2793,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2819,7 +2819,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vmsleu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
index 31eaf882dc70..0fb94bd9ab74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -948,7 +948,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1023,7 +1023,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1046,7 +1046,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1072,7 +1072,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1095,7 +1095,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1121,7 +1121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1144,7 +1144,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1170,7 +1170,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1193,7 +1193,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1219,7 +1219,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1242,7 +1242,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1268,7 +1268,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1366,7 +1366,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1389,7 +1389,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1487,7 +1487,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1513,7 +1513,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1562,7 +1562,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1585,7 +1585,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1611,7 +1611,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1634,7 +1634,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1660,7 +1660,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1683,7 +1683,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1732,7 +1732,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1758,7 +1758,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1807,7 +1807,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1824,7 +1824,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1842,7 +1842,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1859,7 +1859,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1877,7 +1877,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1894,7 +1894,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1912,7 +1912,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1947,7 +1947,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1964,7 +1964,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1982,7 +1982,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1999,7 +1999,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2017,7 +2017,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2034,7 +2034,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2052,7 +2052,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2069,7 +2069,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2087,7 +2087,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2104,7 +2104,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2122,7 +2122,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2139,7 +2139,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2157,7 +2157,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2174,7 +2174,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2192,7 +2192,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2209,7 +2209,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2227,7 +2227,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2244,7 +2244,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2262,7 +2262,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2279,7 +2279,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2297,7 +2297,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2314,7 +2314,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2332,7 +2332,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2349,7 +2349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2367,7 +2367,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2384,7 +2384,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2402,7 +2402,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2419,7 +2419,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2437,7 +2437,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -2456,7 +2456,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -2474,7 +2474,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -2492,7 +2492,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -2528,7 +2528,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2546,7 +2546,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2564,7 +2564,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2582,7 +2582,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2600,7 +2600,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2636,7 +2636,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vsca
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2654,7 +2654,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2672,7 +2672,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2690,7 +2690,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2708,7 +2708,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2726,7 +2726,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2744,7 +2744,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2762,7 +2762,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
index 199605b838a0..5672f2689325 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmslt.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmslt.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmslt.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
index e03678a69557..18a0e8b9a6e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
index 71e499b3ca39..cb4d4d871363 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsltu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsltu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsltu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
index fea3c2246bf1..092de13f14c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
index 13f93d948995..a4d47ce14e2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@ define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@ define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@ define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@ define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@ define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@ define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@ define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@ define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@ define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@ define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@ define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@ define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
index 37127542666c..8ca43e36120b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@ define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@ define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@ define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@ define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@ define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@ define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@ define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@ define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@ define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@ define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@ define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@ define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
index 74662ab9f89a..d043dfbf0837 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsle.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsle.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsle.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
index 62e5b3be5666..85e9485a81b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
index 5bf609ef07f1..ff3bc12d02d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
index 7ac748dcff7c..2f6fddfec1b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
index 8ca44d66ebeb..cb488de76533 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmslt.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmslt.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmslt.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
index 6baa2630d31b..c105bf087093 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
index 3ccd474a441b..c3bdc22fd9fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
index 85898e47b2f9..1f4cd7388e91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
index b8feac3c8ce6..f1d4ba51947c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsne.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmsne.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsne.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmsne.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsne.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmsne.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
index 349daf953df5..8d8ef761b45e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
index e0ee3a5797e8..9a3a64d599b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@ define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@ define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@ define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@ define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@ define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@ define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@ define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@ define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@ define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@ define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@ define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@ define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
index bd0780129a3c..07e1b3c81d58 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@ define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@ define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@ define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@ define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@ define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1)
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@ define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@ define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@ define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@ define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@ define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@ define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@ define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
index 512d8d4281fa..155aba574f0e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmul.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmul.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmul.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
index 78f2e63357d1..74a9670660de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
index 98e43495a664..c18319254454 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmulh.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmulh.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmulh.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
index 84ff58458dbd..3665d9ae2832 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
index 6cbd01d80251..094866db483e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vs
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
index ef8fc76ebd06..1f999651774a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vs
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
index e53fa4ca6b04..1c445f8a9b81 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
index 9bfe1fcae7aa..4d2eeb5ded8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
index ab714c9225af..cef71e3df544 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -28,7 +28,7 @@ define <vscale x 2 x i8> @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -46,7 +46,7 @@ define <vscale x 4 x i8> @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -64,7 +64,7 @@ define <vscale x 8 x i8> @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -82,7 +82,7 @@ define <vscale x 16 x i8> @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 32 x i8> @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -118,7 +118,7 @@ define <vscale x 64 x i8> @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8(<vscale x 64 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -136,7 +136,7 @@ define <vscale x 1 x i16> @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16(<vscale x 1 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -154,7 +154,7 @@ define <vscale x 2 x i16> @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16(<vscale x 2 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -172,7 +172,7 @@ define <vscale x 4 x i16> @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16(<vscale x 4 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -190,7 +190,7 @@ define <vscale x 8 x i16> @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16(<vscale x 8 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -208,7 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -226,7 +226,7 @@ define <vscale x 32 x i16> @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -244,7 +244,7 @@ define <vscale x 1 x i32> @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -262,7 +262,7 @@ define <vscale x 2 x i32> @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -280,7 +280,7 @@ define <vscale x 4 x i32> @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -298,7 +298,7 @@ define <vscale x 8 x i32> @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -316,7 +316,7 @@ define <vscale x 16 x i32> @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 1 x i64> @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -352,7 +352,7 @@ define <vscale x 2 x i64> @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -370,7 +370,7 @@ define <vscale x 4 x i64> @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -388,7 +388,7 @@ define <vscale x 8 x i64> @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -406,7 +406,7 @@ define <vscale x 1 x half> @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16(<vscale x 1 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
     <vscale x 1 x half> %0,
@@ -424,7 +424,7 @@ define <vscale x 2 x half> @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16(<vscale x 2 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
     <vscale x 2 x half> %0,
@@ -442,7 +442,7 @@ define <vscale x 4 x half> @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16(<vscale x 4 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
     <vscale x 4 x half> %0,
@@ -460,7 +460,7 @@ define <vscale x 8 x half> @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16(<vscale x 8 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
     <vscale x 8 x half> %0,
@@ -478,7 +478,7 @@ define <vscale x 16 x half> @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
     <vscale x 16 x half> %0,
@@ -496,7 +496,7 @@ define <vscale x 32 x half> @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
     <vscale x 32 x half> %0,
@@ -514,7 +514,7 @@ define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
     <vscale x 1 x float> %0,
@@ -532,7 +532,7 @@ define <vscale x 2 x float> @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32(<vscale x 2 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
     <vscale x 2 x float> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x float> @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32(<vscale x 4 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
     <vscale x 4 x float> %0,
@@ -568,7 +568,7 @@ define <vscale x 8 x float> @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32(<vscale x 8 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
     <vscale x 8 x float> %0,
@@ -586,7 +586,7 @@ define <vscale x 16 x float> @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
     <vscale x 16 x float> %0,
@@ -604,7 +604,7 @@ define <vscale x 1 x double> @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
     <vscale x 1 x double> %0,
@@ -622,7 +622,7 @@ define <vscale x 2 x double> @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
     <vscale x 2 x double> %0,
@@ -640,7 +640,7 @@ define <vscale x 4 x double> @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
     <vscale x 4 x double> %0,
@@ -658,7 +658,7 @@ define <vscale x 8 x double> @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
index e0ab49fdb4ed..8573f050010b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -28,7 +28,7 @@ define <vscale x 2 x i8> @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -46,7 +46,7 @@ define <vscale x 4 x i8> @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -64,7 +64,7 @@ define <vscale x 8 x i8> @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -82,7 +82,7 @@ define <vscale x 16 x i8> @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 32 x i8> @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -118,7 +118,7 @@ define <vscale x 64 x i8> @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8(<vscale x 64 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -136,7 +136,7 @@ define <vscale x 1 x i16> @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16(<vscale x 1 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -154,7 +154,7 @@ define <vscale x 2 x i16> @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16(<vscale x 2 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -172,7 +172,7 @@ define <vscale x 4 x i16> @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16(<vscale x 4 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -190,7 +190,7 @@ define <vscale x 8 x i16> @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16(<vscale x 8 x i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -208,7 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -226,7 +226,7 @@ define <vscale x 32 x i16> @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -244,7 +244,7 @@ define <vscale x 1 x i32> @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32(<vscale x 1 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -262,7 +262,7 @@ define <vscale x 2 x i32> @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32(<vscale x 2 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -280,7 +280,7 @@ define <vscale x 4 x i32> @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32(<vscale x 4 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -298,7 +298,7 @@ define <vscale x 8 x i32> @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32(<vscale x 8 x i32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -316,7 +316,7 @@ define <vscale x 16 x i32> @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 1 x i64> @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64(<vscale x 1 x i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -352,7 +352,7 @@ define <vscale x 2 x i64> @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64(<vscale x 2 x i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -370,7 +370,7 @@ define <vscale x 4 x i64> @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64(<vscale x 4 x i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -388,7 +388,7 @@ define <vscale x 8 x i64> @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64(<vscale x 8 x i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -406,7 +406,7 @@ define <vscale x 1 x half> @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16(<vscale x 1 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
     <vscale x 1 x half> %0,
@@ -424,7 +424,7 @@ define <vscale x 2 x half> @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16(<vscale x 2 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
     <vscale x 2 x half> %0,
@@ -442,7 +442,7 @@ define <vscale x 4 x half> @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16(<vscale x 4 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
     <vscale x 4 x half> %0,
@@ -460,7 +460,7 @@ define <vscale x 8 x half> @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16(<vscale x 8 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
     <vscale x 8 x half> %0,
@@ -478,7 +478,7 @@ define <vscale x 16 x half> @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
     <vscale x 16 x half> %0,
@@ -496,7 +496,7 @@ define <vscale x 32 x half> @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
     <vscale x 32 x half> %0,
@@ -514,7 +514,7 @@ define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
     <vscale x 1 x float> %0,
@@ -532,7 +532,7 @@ define <vscale x 2 x float> @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32(<vscale x 2 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
     <vscale x 2 x float> %0,
@@ -550,7 +550,7 @@ define <vscale x 4 x float> @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32(<vscale x 4 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
     <vscale x 4 x float> %0,
@@ -568,7 +568,7 @@ define <vscale x 8 x float> @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32(<vscale x 8 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
     <vscale x 8 x float> %0,
@@ -586,7 +586,7 @@ define <vscale x 16 x float> @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
     <vscale x 16 x float> %0,
@@ -604,7 +604,7 @@ define <vscale x 1 x double> @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
     <vscale x 1 x double> %0,
@@ -622,7 +622,7 @@ define <vscale x 2 x double> @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
     <vscale x 2 x double> %0,
@@ -640,7 +640,7 @@ define <vscale x 4 x double> @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
     <vscale x 4 x double> %0,
@@ -658,7 +658,7 @@ define <vscale x 8 x double> @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
index a0287dc6ae7e..94d6c2891974 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
   i8,
   i32);
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i32 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 %0,
@@ -28,7 +28,7 @@ define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i32 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 %0,
@@ -46,7 +46,7 @@ define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i32 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 %0,
@@ -64,7 +64,7 @@ define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i32 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 %0,
@@ -82,7 +82,7 @@ define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i32 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 %0,
@@ -100,7 +100,7 @@ define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i32 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 %0,
@@ -118,7 +118,7 @@ define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i32 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 %0,
@@ -136,7 +136,7 @@ define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 %0,
@@ -154,7 +154,7 @@ define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 %0,
@@ -172,7 +172,7 @@ define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 %0,
@@ -190,7 +190,7 @@ define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 %0,
@@ -208,7 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i32 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 %0,
@@ -226,7 +226,7 @@ define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i32 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 %0,
@@ -244,7 +244,7 @@ define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 %0,
@@ -262,7 +262,7 @@ define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 %0,
@@ -280,7 +280,7 @@ define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 %0,
@@ -298,7 +298,7 @@ define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 %0,
@@ -316,7 +316,7 @@ define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i32 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 %0,
@@ -339,7 +339,7 @@ define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, i32 %1) nounwind
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v8, (a0), zero
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 %0,
@@ -362,7 +362,7 @@ define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, i32 %1) nounwind
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v8, (a0), zero
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 %0,
@@ -385,7 +385,7 @@ define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, i32 %1) nounwind
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v8, (a0), zero
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 %0,
@@ -408,7 +408,7 @@ define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, i32 %1) nounwind
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v8, (a0), zero
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 %0,
@@ -422,7 +422,7 @@ define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 9,
@@ -436,7 +436,7 @@ define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 9,
@@ -450,7 +450,7 @@ define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 9,
@@ -464,7 +464,7 @@ define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 9,
@@ -478,7 +478,7 @@ define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 9,
@@ -492,7 +492,7 @@ define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 9,
@@ -506,7 +506,7 @@ define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 9,
@@ -520,7 +520,7 @@ define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 9,
@@ -534,7 +534,7 @@ define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 9,
@@ -548,7 +548,7 @@ define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 9,
@@ -562,7 +562,7 @@ define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 9,
@@ -576,7 +576,7 @@ define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 9,
@@ -590,7 +590,7 @@ define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 9,
@@ -604,7 +604,7 @@ define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 9,
@@ -618,7 +618,7 @@ define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 9,
@@ -632,7 +632,7 @@ define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 9,
@@ -646,7 +646,7 @@ define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 9,
@@ -660,7 +660,7 @@ define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 9,
@@ -674,7 +674,7 @@ define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 9,
@@ -688,7 +688,7 @@ define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 9,
@@ -702,7 +702,7 @@ define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 9,
@@ -716,7 +716,7 @@ define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64(i32 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
index 52acf994689d..c3a757b8f5ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
   i8,
   i64);
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i64 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 %0,
@@ -28,7 +28,7 @@ define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i64 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 %0,
@@ -46,7 +46,7 @@ define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i64 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 %0,
@@ -64,7 +64,7 @@ define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i64 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 %0,
@@ -82,7 +82,7 @@ define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i64 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 %0,
@@ -100,7 +100,7 @@ define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i64 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 %0,
@@ -118,7 +118,7 @@ define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i64 %1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 %0,
@@ -136,7 +136,7 @@ define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 %0,
@@ -154,7 +154,7 @@ define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 %0,
@@ -172,7 +172,7 @@ define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 %0,
@@ -190,7 +190,7 @@ define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 %0,
@@ -208,7 +208,7 @@ define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i64 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 %0,
@@ -226,7 +226,7 @@ define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i64 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 %0,
@@ -244,7 +244,7 @@ define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 %0,
@@ -262,7 +262,7 @@ define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 %0,
@@ -280,7 +280,7 @@ define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 %0,
@@ -298,7 +298,7 @@ define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 %0,
@@ -316,7 +316,7 @@ define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i64 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 %0,
@@ -334,7 +334,7 @@ define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 %0,
@@ -352,7 +352,7 @@ define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 %0,
@@ -370,7 +370,7 @@ define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 %0,
@@ -388,7 +388,7 @@ define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 %0,
@@ -402,7 +402,7 @@ define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 9,
@@ -416,7 +416,7 @@ define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 9,
@@ -430,7 +430,7 @@ define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 9,
@@ -444,7 +444,7 @@ define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 9,
@@ -458,7 +458,7 @@ define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 9,
@@ -472,7 +472,7 @@ define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 9,
@@ -486,7 +486,7 @@ define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 9,
@@ -500,7 +500,7 @@ define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 9,
@@ -514,7 +514,7 @@ define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 9,
@@ -528,7 +528,7 @@ define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 9,
@@ -542,7 +542,7 @@ define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 9,
@@ -556,7 +556,7 @@ define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 9,
@@ -570,7 +570,7 @@ define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 9,
@@ -584,7 +584,7 @@ define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 9,
@@ -598,7 +598,7 @@ define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 9,
@@ -612,7 +612,7 @@ define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 9,
@@ -626,7 +626,7 @@ define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 9,
@@ -640,7 +640,7 @@ define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 9,
@@ -654,7 +654,7 @@ define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 9,
@@ -668,7 +668,7 @@ define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 9,
@@ -682,7 +682,7 @@ define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 9,
@@ -696,7 +696,7 @@ define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64(i64 %0) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 9,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
index 7916e467f5e8..3a0de6b7c92f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmxnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmxnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmxnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmxnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmxnor_mm_nxv16i1(<vscale x 16 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmxnor_mm_nxv32i1(<vscale x 32 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmxnor_mm_nxv64i1(<vscale x 64 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
index 4ced869f962f..025e1f2b79f4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmxnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmxnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmxnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmxnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmxnor_mm_nxv16i1(<vscale x 16 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmxnor_mm_nxv32i1(<vscale x 32 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmxnor_mm_nxv64i1(<vscale x 64 x i1> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
index cffa7adad71e..8155da1bcb89 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
index e4c8895259de..7c9b7040ffad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@ define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@ define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@ define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@ define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@ define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
index 31af2f81434f..edd37e451450 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
index 9a9644ea46c9..1972f876ade6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@ define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@ define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@ define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@ define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@ define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@ define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@ define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
index d582ba8ada92..fd8e9e2ced59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
index fd21d8b90bc2..5c666224bd7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16>
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i3
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@ define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@ define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@ define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@ define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@ define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@ define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@ define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
index 5b307cadb7b3..c846217b22c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@ define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@ define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v25, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1567,7 +1567,7 @@ define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1597,7 +1597,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v26, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1627,7 +1627,7 @@ define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1657,7 +1657,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v28, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
index 5a37fdba349c..fc9d9067958e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@ define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@ define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1530,7 +1530,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1576,7 +1576,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1622,7 +1622,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1645,7 +1645,7 @@ define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
index d19137df9130..4cad98ea7efc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@ define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@ define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v25, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1567,7 +1567,7 @@ define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1597,7 +1597,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v26, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1627,7 +1627,7 @@ define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1657,7 +1657,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v28, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
index 596092e9b48a..6bd5924e5369 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i8>  @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@ define <vscale x 2 x i8>  @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@ define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@ define <vscale x 4 x i8>  @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@ define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@ define <vscale x 8 x i8>  @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i8>  @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 32 x i8>  @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 1 x i16>  @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 2 x i16>  @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x i16>  @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 8 x i16>  @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 16 x i16>  @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 1 x i32>  @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@ define <vscale x 2 x i32>  @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@ define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i32>  @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@ define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@ define <vscale x 8 x i32>  @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@ define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1530,7 +1530,7 @@ define <vscale x 1 x i64>  @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1576,7 +1576,7 @@ define <vscale x 2 x i64>  @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1622,7 +1622,7 @@ define <vscale x 4 x i64>  @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1645,7 +1645,7 @@ define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
index 71272f237bce..33ff81121b52 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_vx_nxv16i8_nxv16i16(<vscale x 16 x i1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_vx_nxv32i8_nxv32i16(<vscale x 32 x i1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_vx_nxv1i16_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_vx_nxv2i16_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_vx_nxv4i16_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_vx_nxv8i16_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_vx_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_vx_nxv1i32_nxv1i64(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_vx_nxv2i32_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_vx_nxv4i32_nxv4i64(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_vx_nxv8i32_nxv8i64(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
index 30f11f8f7cc2..94665ff95b17 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_vx_nxv16i8_nxv16i16(<vscale x 16 x i1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_vx_nxv32i8_nxv32i16(<vscale x 32 x i1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_vx_nxv1i16_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_vx_nxv2i16_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_vx_nxv4i16_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_vx_nxv8i16_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_vx_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_vx_nxv1i32_nxv1i64(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_vx_nxv2i32_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_vx_nxv4i32_nxv4i64(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_vx_nxv8i32_nxv8i64(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@ define <vscale x 1 x i8> @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 2 x i8> @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i8> @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i8> @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@ define <vscale x 16 x i8> @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 32 x i8> @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 1 x i16> @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@ define <vscale x 2 x i16> @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@ define <vscale x 4 x i16> @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i16> @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@ define <vscale x 16 x i16> @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@ define <vscale x 2 x i32> @intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@ define <vscale x 8 x i32> @intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
index 635c0b2ec904..2840e7e1c0b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
index d049ca6c59b3..55b4f93772dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@ define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@ define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@ define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@ define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@ define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@ define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@ define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@ define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@ define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@ define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
index fc0d3b508467..a45736c9dcbd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vs
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vs
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vor.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vor.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vor.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@ define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@ define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@ define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@ define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@ define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@ define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@ define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@ define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@ define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@ define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@ define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@ define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@ define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@ define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@ define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@ define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@ define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@ define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@ define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
index 980191a1197a..8b4a3d496b74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vs
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vs
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
index 48200b3ced88..1350c34d4490 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare i32 @llvm.riscv.vpopc.i32.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -10,7 +10,7 @@ define i32 @intrinsic_vpopc_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define i32 @intrinsic_vpopc_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -50,7 +50,7 @@ define i32 @intrinsic_vpopc_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -71,7 +71,7 @@ define i32 @intrinsic_vpopc_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -90,7 +90,7 @@ define i32 @intrinsic_vpopc_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -111,7 +111,7 @@ define i32 @intrinsic_vpopc_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -130,7 +130,7 @@ define i32 @intrinsic_vpopc_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -151,7 +151,7 @@ define i32 @intrinsic_vpopc_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -170,7 +170,7 @@ define i32 @intrinsic_vpopc_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -191,7 +191,7 @@ define i32 @intrinsic_vpopc_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -210,7 +210,7 @@ define i32 @intrinsic_vpopc_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -231,7 +231,7 @@ define i32 @intrinsic_vpopc_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -250,7 +250,7 @@ define i32 @intrinsic_vpopc_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -271,7 +271,7 @@ define i32 @intrinsic_vpopc_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll
index d33787c80220..6e0bae3ed50c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare i64 @llvm.riscv.vpopc.i64.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -10,7 +10,7 @@ define i64 @intrinsic_vpopc_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@ define i64 @intrinsic_vpopc_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -50,7 +50,7 @@ define i64 @intrinsic_vpopc_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -71,7 +71,7 @@ define i64 @intrinsic_vpopc_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -90,7 +90,7 @@ define i64 @intrinsic_vpopc_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -111,7 +111,7 @@ define i64 @intrinsic_vpopc_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -130,7 +130,7 @@ define i64 @intrinsic_vpopc_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -151,7 +151,7 @@ define i64 @intrinsic_vpopc_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -170,7 +170,7 @@ define i64 @intrinsic_vpopc_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -191,7 +191,7 @@ define i64 @intrinsic_vpopc_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -210,7 +210,7 @@ define i64 @intrinsic_vpopc_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -231,7 +231,7 @@ define i64 @intrinsic_vpopc_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -250,7 +250,7 @@ define i64 @intrinsic_vpopc_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwin
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -271,7 +271,7 @@ define i64 @intrinsic_vpopc_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv64i1(
     <vscale x 64 x i1> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
index 9f0020905365..d70f8560ceda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
index 387bedd0275f..ca22b16f9cec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
index 02ba0ab996da..6f02ceabbbae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
index fe56210fc875..43fad3269691 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
index b3e6656b729a..efe85ed0b224 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
index 034f63003694..9c2b918f4b8a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
index 866548f18090..cec011713649 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
index 127e6f5a59d7..acaf4e2dabe2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
index 3d142c38c949..d9038aa46420 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
index 89d14e986a56..f08fd12b1050 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
index 9c90c0e442b5..6d5873adb003 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
index 8fbaf45737cc..de3f34683cde 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
index 2fb704b2d630..99928f838a62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
index 088f249f0c6c..3a92a1e9a937 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
index 59bc5cd0e401..282a3d96d7a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
index aa32c7abcf7c..2c5d826c81dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
index e416b736ca56..3222405df08a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vrem.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vrem.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vrem.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
index fbce5aa44e79..90e3c52bc444 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
index 3ef471718f3e..a26bcfb23c65 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vremu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vremu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vremu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
index 57f4952b12a3..42bf90f70d4e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
index fe9aa32b6b03..d6c1c8038872 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -302,10 +302,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(
 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -328,7 +328,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -350,7 +350,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -373,7 +373,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -395,7 +395,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -418,7 +418,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -440,7 +440,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -463,7 +463,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -485,7 +485,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -508,7 +508,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -530,7 +530,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -553,7 +553,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -576,7 +576,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i1
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -599,7 +599,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -621,7 +621,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -644,7 +644,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -666,7 +666,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -689,7 +689,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -711,7 +711,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -734,7 +734,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -756,7 +756,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -779,7 +779,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -802,7 +802,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i3
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -825,7 +825,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -847,7 +847,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -870,7 +870,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -892,7 +892,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -915,7 +915,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -937,7 +937,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -960,7 +960,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -982,7 +982,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -1050,7 +1050,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -1073,7 +1073,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -1096,7 +1096,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -1163,7 +1163,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -1186,7 +1186,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -1253,7 +1253,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -1299,7 +1299,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -1322,7 +1322,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -1344,7 +1344,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -1367,7 +1367,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -1389,7 +1389,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -1412,7 +1412,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -1434,7 +1434,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -1457,7 +1457,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -1480,7 +1480,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -1503,7 +1503,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -1548,7 +1548,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -1593,7 +1593,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -1615,7 +1615,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -1638,7 +1638,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -1660,7 +1660,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -1683,7 +1683,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -1705,7 +1705,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -1728,7 +1728,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32(<vscale x 3
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -1773,7 +1773,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32(<vscale x 6
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -1795,7 +1795,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -1818,7 +1818,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -1840,7 +1840,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -1908,7 +1908,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -1930,7 +1930,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -1953,7 +1953,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -1975,7 +1975,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -2020,7 +2020,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -2043,7 +2043,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -2065,7 +2065,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -2088,7 +2088,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2110,7 +2110,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2133,7 +2133,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2155,7 +2155,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2178,7 +2178,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2200,7 +2200,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2223,7 +2223,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2245,7 +2245,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2268,7 +2268,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2290,7 +2290,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -2335,7 +2335,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -2358,7 +2358,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -2380,7 +2380,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -2403,7 +2403,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -2425,7 +2425,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -2448,7 +2448,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -2470,7 +2470,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -2493,7 +2493,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -2515,7 +2515,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -2538,7 +2538,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -2560,7 +2560,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -2583,7 +2583,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -2605,7 +2605,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -2628,7 +2628,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -2650,7 +2650,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -2673,7 +2673,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -2695,7 +2695,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -2718,7 +2718,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -2740,7 +2740,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -2763,7 +2763,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -2785,7 +2785,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -2808,7 +2808,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -2830,7 +2830,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -2853,7 +2853,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -2875,7 +2875,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -2898,7 +2898,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -2920,7 +2920,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -2943,7 +2943,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -2965,7 +2965,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -2983,7 +2983,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -2998,7 +2998,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -3016,7 +3016,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -3031,7 +3031,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -3049,7 +3049,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -3064,7 +3064,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -3082,7 +3082,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -3097,7 +3097,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -3115,7 +3115,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -3130,7 +3130,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -3148,7 +3148,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32(<vscale x 3
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -3163,7 +3163,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -3181,7 +3181,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32(<vscale x 6
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -3196,7 +3196,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -3214,7 +3214,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -3229,7 +3229,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -3247,7 +3247,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -3262,7 +3262,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -3280,7 +3280,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -3295,7 +3295,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -3313,7 +3313,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -3328,7 +3328,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -3346,7 +3346,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -3361,7 +3361,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -3379,7 +3379,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -3394,7 +3394,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -3412,7 +3412,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -3427,7 +3427,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -3445,7 +3445,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -3460,7 +3460,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -3478,7 +3478,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -3493,7 +3493,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -3511,7 +3511,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -3526,7 +3526,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -3544,7 +3544,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -3559,7 +3559,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -3592,7 +3592,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -3610,7 +3610,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -3625,7 +3625,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -3643,7 +3643,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -3658,7 +3658,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -3676,7 +3676,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -3691,7 +3691,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -3709,7 +3709,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -3724,7 +3724,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -3742,7 +3742,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -3757,7 +3757,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -3775,7 +3775,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -3790,7 +3790,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -3808,7 +3808,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -3823,7 +3823,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -3841,7 +3841,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -3856,7 +3856,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -3874,7 +3874,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -3889,7 +3889,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -3907,7 +3907,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -3922,7 +3922,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -3940,7 +3940,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -3955,7 +3955,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -3973,7 +3973,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -3988,7 +3988,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -4006,7 +4006,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -4021,7 +4021,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -4039,7 +4039,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -4054,7 +4054,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
index 963f795c0dc9..e28ce70d3efb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -302,10 +302,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(
 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -328,7 +328,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -350,7 +350,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -373,7 +373,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -395,7 +395,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -418,7 +418,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -440,7 +440,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -463,7 +463,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -485,7 +485,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -508,7 +508,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -530,7 +530,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -553,7 +553,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -576,7 +576,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i1
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -599,7 +599,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -621,7 +621,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -644,7 +644,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -666,7 +666,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -689,7 +689,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -711,7 +711,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -734,7 +734,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -756,7 +756,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -779,7 +779,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -802,7 +802,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i3
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -825,7 +825,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -847,7 +847,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -870,7 +870,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -892,7 +892,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -915,7 +915,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -937,7 +937,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -960,7 +960,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<v
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1006,7 +1006,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -1028,7 +1028,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -1051,7 +1051,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -1073,7 +1073,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -1096,7 +1096,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -1163,7 +1163,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -1186,7 +1186,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<v
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -1299,7 +1299,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -1322,7 +1322,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -1344,7 +1344,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -1367,7 +1367,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -1389,7 +1389,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -1412,7 +1412,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -1434,7 +1434,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -1457,7 +1457,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -1480,7 +1480,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -1503,7 +1503,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -1525,7 +1525,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -1548,7 +1548,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -1593,7 +1593,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -1615,7 +1615,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -1638,7 +1638,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -1661,7 +1661,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -1684,7 +1684,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -1706,7 +1706,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -1729,7 +1729,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -1751,7 +1751,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -1774,7 +1774,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -1796,7 +1796,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -1864,7 +1864,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -1886,7 +1886,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -1909,7 +1909,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64(<vscale x 3
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -1931,7 +1931,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -1954,7 +1954,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64(<vscale x 6
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -1976,7 +1976,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -1999,7 +1999,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -2021,7 +2021,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -2044,7 +2044,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -2066,7 +2066,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -2111,7 +2111,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -2134,7 +2134,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -2156,7 +2156,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -2179,7 +2179,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -2201,7 +2201,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -2224,7 +2224,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -2246,7 +2246,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -2269,7 +2269,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -2291,7 +2291,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -2314,7 +2314,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -2336,7 +2336,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -2359,7 +2359,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -2381,7 +2381,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -2404,7 +2404,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -2449,7 +2449,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -2471,7 +2471,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -2494,7 +2494,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2516,7 +2516,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2539,7 +2539,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2561,7 +2561,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2584,7 +2584,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2606,7 +2606,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2629,7 +2629,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2651,7 +2651,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2674,7 +2674,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -2696,7 +2696,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -2719,7 +2719,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -2741,7 +2741,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -2764,7 +2764,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -2786,7 +2786,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -2809,7 +2809,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -2831,7 +2831,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -2854,7 +2854,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -2876,7 +2876,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -2899,7 +2899,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -2921,7 +2921,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -2944,7 +2944,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -2966,7 +2966,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -2989,7 +2989,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -3011,7 +3011,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -3034,7 +3034,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -3056,7 +3056,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -3079,7 +3079,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -3101,7 +3101,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -3124,7 +3124,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64(<vscal
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -3146,7 +3146,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -3169,7 +3169,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -3191,7 +3191,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -3214,7 +3214,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -3236,7 +3236,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -3259,7 +3259,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -3281,7 +3281,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -3304,7 +3304,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -3326,7 +3326,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -3344,7 +3344,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -3359,7 +3359,7 @@ define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -3377,7 +3377,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -3392,7 +3392,7 @@ define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -3410,7 +3410,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -3425,7 +3425,7 @@ define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -3443,7 +3443,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -3458,7 +3458,7 @@ define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -3476,7 +3476,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -3491,7 +3491,7 @@ define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -3509,7 +3509,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64(<vscale x 3
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -3524,7 +3524,7 @@ define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -3542,7 +3542,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64(<vscale x 6
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -3557,7 +3557,7 @@ define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -3575,7 +3575,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -3590,7 +3590,7 @@ define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -3608,7 +3608,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -3623,7 +3623,7 @@ define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -3641,7 +3641,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -3656,7 +3656,7 @@ define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -3674,7 +3674,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -3689,7 +3689,7 @@ define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -3707,7 +3707,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -3722,7 +3722,7 @@ define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -3740,7 +3740,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -3755,7 +3755,7 @@ define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -3773,7 +3773,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -3788,7 +3788,7 @@ define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -3806,7 +3806,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -3821,7 +3821,7 @@ define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -3839,7 +3839,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -3854,7 +3854,7 @@ define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -3872,7 +3872,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -3887,7 +3887,7 @@ define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -3905,7 +3905,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -3920,7 +3920,7 @@ define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -3938,7 +3938,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -3953,7 +3953,7 @@ define <vscale x 1 x i64> @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -3971,7 +3971,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -3986,7 +3986,7 @@ define <vscale x 2 x i64> @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -4004,7 +4004,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -4019,7 +4019,7 @@ define <vscale x 4 x i64> @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -4037,7 +4037,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -4052,7 +4052,7 @@ define <vscale x 8 x i64> @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -4070,7 +4070,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -4085,7 +4085,7 @@ define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -4103,7 +4103,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -4118,7 +4118,7 @@ define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -4136,7 +4136,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -4151,7 +4151,7 @@ define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -4169,7 +4169,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -4184,7 +4184,7 @@ define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -4202,7 +4202,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -4217,7 +4217,7 @@ define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -4235,7 +4235,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -4250,7 +4250,7 @@ define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -4268,7 +4268,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -4283,7 +4283,7 @@ define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -4301,7 +4301,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -4316,7 +4316,7 @@ define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -4334,7 +4334,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -4349,7 +4349,7 @@ define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -4367,7 +4367,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -4382,7 +4382,7 @@ define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -4400,7 +4400,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -4415,7 +4415,7 @@ define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -4433,7 +4433,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -4448,7 +4448,7 @@ define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -4466,7 +4466,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -4481,7 +4481,7 @@ define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -4499,7 +4499,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -4514,7 +4514,7 @@ define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -4532,7 +4532,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -4547,7 +4547,7 @@ define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
index 7a038d096cbd..bdd4b36d5e8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 32 x i16> @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -530,7 +530,7 @@ define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16(<vs
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -553,7 +553,7 @@ define <vscale x 1 x i32> @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -575,7 +575,7 @@ define <vscale x 1 x i32> @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -598,7 +598,7 @@ define <vscale x 4 x i32> @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -620,7 +620,7 @@ define <vscale x 4 x i32> @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -643,7 +643,7 @@ define <vscale x 8 x i32> @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -665,7 +665,7 @@ define <vscale x 8 x i32> @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -688,7 +688,7 @@ define <vscale x 16 x i32> @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -711,7 +711,7 @@ define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32(<vs
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i64> @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -756,7 +756,7 @@ define <vscale x 4 x i64> @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -779,7 +779,7 @@ define <vscale x 8 x i64> @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -802,7 +802,7 @@ define <vscale x 8 x i64> @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl2re16.v v26, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -825,7 +825,7 @@ define <vscale x 1 x half> @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
     <vscale x 1 x half> %0,
@@ -847,7 +847,7 @@ define <vscale x 1 x half> @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -870,7 +870,7 @@ define <vscale x 2 x half> @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
     <vscale x 2 x half> %0,
@@ -892,7 +892,7 @@ define <vscale x 2 x half> @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -915,7 +915,7 @@ define <vscale x 4 x half> @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
     <vscale x 4 x half> %0,
@@ -937,7 +937,7 @@ define <vscale x 4 x half> @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -960,7 +960,7 @@ define <vscale x 8 x half> @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
     <vscale x 8 x half> %0,
@@ -982,7 +982,7 @@ define <vscale x 8 x half> @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 16 x half> @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 16 x half> @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1050,7 +1050,7 @@ define <vscale x 32 x half> @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1073,7 +1073,7 @@ define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1096,7 +1096,7 @@ define <vscale x 1 x float> @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 1 x float> @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x float> @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1163,7 +1163,7 @@ define <vscale x 4 x float> @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1186,7 +1186,7 @@ define <vscale x 8 x float> @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 8 x float> @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 16 x float> @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
     <vscale x 16 x float> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32(<
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 4 x double> @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
     <vscale x 4 x double> %0,
@@ -1299,7 +1299,7 @@ define <vscale x 4 x double> @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -1322,7 +1322,7 @@ define <vscale x 8 x double> @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
     <vscale x 8 x double> %0,
@@ -1345,7 +1345,7 @@ define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64(<vs
 ; CHECK-NEXT:    vl2re16.v v26, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
index f6d8f1ba9e3b..edc998676ed7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i16> @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i16> @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i16> @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i16> @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i16> @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i16> @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i16> @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i16> @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i16> @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 32 x i16> @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -530,7 +530,7 @@ define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16(<vs
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -553,7 +553,7 @@ define <vscale x 1 x i32> @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -575,7 +575,7 @@ define <vscale x 1 x i32> @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -598,7 +598,7 @@ define <vscale x 4 x i32> @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -620,7 +620,7 @@ define <vscale x 4 x i32> @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -643,7 +643,7 @@ define <vscale x 8 x i32> @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -665,7 +665,7 @@ define <vscale x 8 x i32> @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -688,7 +688,7 @@ define <vscale x 16 x i32> @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -711,7 +711,7 @@ define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32(<vs
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -734,7 +734,7 @@ define <vscale x 4 x i64> @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -756,7 +756,7 @@ define <vscale x 4 x i64> @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -779,7 +779,7 @@ define <vscale x 8 x i64> @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -802,7 +802,7 @@ define <vscale x 8 x i64> @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl2re16.v v26, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -825,7 +825,7 @@ define <vscale x 1 x half> @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
     <vscale x 1 x half> %0,
@@ -847,7 +847,7 @@ define <vscale x 1 x half> @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -870,7 +870,7 @@ define <vscale x 2 x half> @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
     <vscale x 2 x half> %0,
@@ -892,7 +892,7 @@ define <vscale x 2 x half> @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -915,7 +915,7 @@ define <vscale x 4 x half> @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
     <vscale x 4 x half> %0,
@@ -937,7 +937,7 @@ define <vscale x 4 x half> @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -960,7 +960,7 @@ define <vscale x 8 x half> @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
     <vscale x 8 x half> %0,
@@ -982,7 +982,7 @@ define <vscale x 8 x half> @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 16 x half> @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 16 x half> @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1050,7 +1050,7 @@ define <vscale x 32 x half> @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1073,7 +1073,7 @@ define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1096,7 +1096,7 @@ define <vscale x 1 x float> @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 1 x float> @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 4 x float> @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1163,7 +1163,7 @@ define <vscale x 4 x float> @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1186,7 +1186,7 @@ define <vscale x 8 x float> @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 8 x float> @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 16 x float> @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
     <vscale x 16 x float> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32(<
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 4 x double> @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
     <vscale x 4 x double> %0,
@@ -1299,7 +1299,7 @@ define <vscale x 4 x double> @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -1322,7 +1322,7 @@ define <vscale x 8 x double> @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
     <vscale x 8 x double> %0,
@@ -1345,7 +1345,7 @@ define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64(<vs
 ; CHECK-NEXT:    vl2re16.v v26, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
index 39eda6824a8f..7123cac920f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -319,7 +319,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -341,7 +341,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -363,7 +363,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -385,7 +385,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -407,7 +407,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -429,7 +429,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -451,7 +451,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -473,7 +473,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -517,7 +517,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -539,7 +539,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -561,7 +561,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -583,7 +583,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -627,7 +627,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -649,7 +649,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -671,7 +671,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -693,7 +693,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -715,7 +715,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -737,7 +737,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -759,7 +759,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -809,7 +809,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -866,7 +866,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -895,7 +895,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -923,7 +923,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -952,7 +952,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -980,7 +980,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v16, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1009,7 +1009,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v24, v16, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1026,7 +1026,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1058,7 +1058,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1073,7 +1073,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1090,7 +1090,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1105,7 +1105,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1122,7 +1122,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1154,7 +1154,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1169,7 +1169,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1186,7 +1186,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1201,7 +1201,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1218,7 +1218,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1233,7 +1233,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1265,7 +1265,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1282,7 +1282,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1314,7 +1314,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1329,7 +1329,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1361,7 +1361,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1378,7 +1378,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1393,7 +1393,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1410,7 +1410,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1425,7 +1425,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1442,7 +1442,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1457,7 +1457,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1474,7 +1474,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1506,7 +1506,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1521,7 +1521,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1538,7 +1538,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1570,7 +1570,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1585,7 +1585,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1602,7 +1602,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1617,7 +1617,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1634,7 +1634,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1649,7 +1649,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1666,7 +1666,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1681,7 +1681,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1698,7 +1698,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1713,7 +1713,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
index 6b364a1a3871..a6e986a3ead9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -319,7 +319,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -341,7 +341,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -363,7 +363,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -385,7 +385,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -407,7 +407,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -429,7 +429,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -451,7 +451,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -473,7 +473,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -517,7 +517,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -539,7 +539,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -561,7 +561,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -583,7 +583,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -627,7 +627,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -649,7 +649,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -671,7 +671,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -693,7 +693,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -715,7 +715,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -737,7 +737,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -759,7 +759,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -803,7 +803,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -825,7 +825,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -847,7 +847,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -891,7 +891,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -913,7 +913,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -935,7 +935,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -957,7 +957,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -974,7 +974,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -989,7 +989,7 @@ define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1006,7 +1006,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1021,7 +1021,7 @@ define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1038,7 +1038,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1053,7 +1053,7 @@ define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1102,7 +1102,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1117,7 +1117,7 @@ define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1134,7 +1134,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1149,7 +1149,7 @@ define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1166,7 +1166,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1198,7 +1198,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1213,7 +1213,7 @@ define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1230,7 +1230,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1245,7 +1245,7 @@ define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1262,7 +1262,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1309,7 +1309,7 @@ define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1326,7 +1326,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1341,7 +1341,7 @@ define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1358,7 +1358,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1373,7 +1373,7 @@ define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1390,7 +1390,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1405,7 +1405,7 @@ define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1422,7 +1422,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1437,7 +1437,7 @@ define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1454,7 +1454,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1469,7 +1469,7 @@ define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1486,7 +1486,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1501,7 +1501,7 @@ define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1518,7 +1518,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1550,7 +1550,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1565,7 +1565,7 @@ define <vscale x 1 x i64> @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1582,7 +1582,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1597,7 +1597,7 @@ define <vscale x 2 x i64> @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1614,7 +1614,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1629,7 +1629,7 @@ define <vscale x 4 x i64> @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1646,7 +1646,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1661,7 +1661,7 @@ define <vscale x 8 x i64> @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
index 53a1866f0470..cf6451b960d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsadd.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsadd.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsadd.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
index 89a5b9d4cf6a..fa88d295aef2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
index 77136dc4721a..9fb3abb944d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
index 05da897920bf..89f54c3a1e2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
index ca893a488a0e..30860f90739a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@ define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@ define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@ define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@ define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@ define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@ define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@ define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@ define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@ define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@ define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@ define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@ define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@ define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@ define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@ define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@ define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@ define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@ define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@ define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@ define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@ define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@ define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@ define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@ define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@ define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@ define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@ define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@ define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@ define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@ define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@ define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -898,7 +898,7 @@ define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v25, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -926,7 +926,7 @@ define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v26, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -954,7 +954,7 @@ define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v28, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -982,7 +982,7 @@ define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
index 03c88c225a85..2f0fe02957cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@ define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@ define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@ define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@ define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@ define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@ define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@ define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@ define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@ define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@ define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@ define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@ define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@ define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@ define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@ define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@ define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@ define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@ define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@ define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@ define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@ define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@ define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@ define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@ define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@ define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@ define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@ define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@ define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@ define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@ define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@ define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@ define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@ define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@ define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@ define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -892,7 +892,7 @@ define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -914,7 +914,7 @@ define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -936,7 +936,7 @@ define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -958,7 +958,7 @@ define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
index d82124ecf86e..1d3bfa11302e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -12,7 +12,7 @@ define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -33,7 +33,7 @@ define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -54,7 +54,7 @@ define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -75,7 +75,7 @@ define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -96,7 +96,7 @@ define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -117,7 +117,7 @@ define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -138,7 +138,7 @@ define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -159,7 +159,7 @@ define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -180,7 +180,7 @@ define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f64(
     <vscale x 1 x double> %0,
@@ -201,7 +201,7 @@ define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -222,7 +222,7 @@ define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f64(
     <vscale x 2 x double> %0,
@@ -243,7 +243,7 @@ define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -264,7 +264,7 @@ define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f64(
     <vscale x 4 x double> %0,
@@ -285,7 +285,7 @@ define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -306,7 +306,7 @@ define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f64(
     <vscale x 8 x double> %0,
@@ -327,7 +327,7 @@ define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -348,7 +348,7 @@ define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -369,7 +369,7 @@ define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -390,7 +390,7 @@ define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -411,7 +411,7 @@ define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -432,7 +432,7 @@ define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -453,7 +453,7 @@ define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -474,7 +474,7 @@ define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -495,7 +495,7 @@ define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -516,7 +516,7 @@ define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -537,7 +537,7 @@ define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -558,7 +558,7 @@ define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f32(
     <vscale x 1 x float> %0,
@@ -579,7 +579,7 @@ define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -600,7 +600,7 @@ define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f32(
     <vscale x 2 x float> %0,
@@ -621,7 +621,7 @@ define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -642,7 +642,7 @@ define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f32(
     <vscale x 4 x float> %0,
@@ -663,7 +663,7 @@ define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -684,7 +684,7 @@ define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f32(
     <vscale x 8 x float> %0,
@@ -705,7 +705,7 @@ define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -726,7 +726,7 @@ define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16f32(
     <vscale x 16 x float> %0,
@@ -747,7 +747,7 @@ define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -768,7 +768,7 @@ define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -789,7 +789,7 @@ define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -810,7 +810,7 @@ define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -831,7 +831,7 @@ define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -852,7 +852,7 @@ define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -873,7 +873,7 @@ define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -894,7 +894,7 @@ define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -915,7 +915,7 @@ define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -936,7 +936,7 @@ define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -957,7 +957,7 @@ define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -978,7 +978,7 @@ define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -999,7 +999,7 @@ define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1020,7 +1020,7 @@ define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1041,7 +1041,7 @@ define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1062,7 +1062,7 @@ define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1083,7 +1083,7 @@ define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1104,7 +1104,7 @@ define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1125,7 +1125,7 @@ define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1146,7 +1146,7 @@ define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1167,7 +1167,7 @@ define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1188,7 +1188,7 @@ define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1209,7 +1209,7 @@ define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1230,7 +1230,7 @@ define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1251,7 +1251,7 @@ define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1272,7 +1272,7 @@ define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1293,7 +1293,7 @@ define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1314,7 +1314,7 @@ define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1335,7 +1335,7 @@ define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1356,7 +1356,7 @@ define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1377,7 +1377,7 @@ define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1398,7 +1398,7 @@ define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1419,7 +1419,7 @@ define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1440,7 +1440,7 @@ define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1461,7 +1461,7 @@ define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1482,7 +1482,7 @@ define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1503,7 +1503,7 @@ define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1524,7 +1524,7 @@ define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1545,7 +1545,7 @@ define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
index 048c8941110a..801b2d1371fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -12,7 +12,7 @@ define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -33,7 +33,7 @@ define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -54,7 +54,7 @@ define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -75,7 +75,7 @@ define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -96,7 +96,7 @@ define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -117,7 +117,7 @@ define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -138,7 +138,7 @@ define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -159,7 +159,7 @@ define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -180,7 +180,7 @@ define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f64(
     <vscale x 1 x double> %0,
@@ -201,7 +201,7 @@ define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -222,7 +222,7 @@ define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f64(
     <vscale x 2 x double> %0,
@@ -243,7 +243,7 @@ define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -264,7 +264,7 @@ define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f64(
     <vscale x 4 x double> %0,
@@ -285,7 +285,7 @@ define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -306,7 +306,7 @@ define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f64(
     <vscale x 8 x double> %0,
@@ -327,7 +327,7 @@ define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -348,7 +348,7 @@ define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -369,7 +369,7 @@ define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -390,7 +390,7 @@ define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -411,7 +411,7 @@ define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -432,7 +432,7 @@ define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -453,7 +453,7 @@ define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -474,7 +474,7 @@ define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -495,7 +495,7 @@ define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -516,7 +516,7 @@ define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -537,7 +537,7 @@ define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -558,7 +558,7 @@ define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f32(
     <vscale x 1 x float> %0,
@@ -579,7 +579,7 @@ define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -600,7 +600,7 @@ define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f32(
     <vscale x 2 x float> %0,
@@ -621,7 +621,7 @@ define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -642,7 +642,7 @@ define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f32(
     <vscale x 4 x float> %0,
@@ -663,7 +663,7 @@ define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -684,7 +684,7 @@ define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f32(
     <vscale x 8 x float> %0,
@@ -705,7 +705,7 @@ define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -726,7 +726,7 @@ define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16f32(
     <vscale x 16 x float> %0,
@@ -747,7 +747,7 @@ define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -768,7 +768,7 @@ define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -789,7 +789,7 @@ define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -810,7 +810,7 @@ define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -831,7 +831,7 @@ define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -852,7 +852,7 @@ define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -873,7 +873,7 @@ define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -894,7 +894,7 @@ define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -915,7 +915,7 @@ define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -936,7 +936,7 @@ define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -957,7 +957,7 @@ define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -978,7 +978,7 @@ define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -999,7 +999,7 @@ define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1020,7 +1020,7 @@ define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1041,7 +1041,7 @@ define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1062,7 +1062,7 @@ define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1083,7 +1083,7 @@ define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1104,7 +1104,7 @@ define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1125,7 +1125,7 @@ define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1146,7 +1146,7 @@ define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1167,7 +1167,7 @@ define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1188,7 +1188,7 @@ define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1209,7 +1209,7 @@ define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1230,7 +1230,7 @@ define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1251,7 +1251,7 @@ define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1272,7 +1272,7 @@ define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1293,7 +1293,7 @@ define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1314,7 +1314,7 @@ define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1335,7 +1335,7 @@ define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1356,7 +1356,7 @@ define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1377,7 +1377,7 @@ define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1398,7 +1398,7 @@ define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1419,7 +1419,7 @@ define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1440,7 +1440,7 @@ define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1461,7 +1461,7 @@ define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1482,7 +1482,7 @@ define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1503,7 +1503,7 @@ define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1524,7 +1524,7 @@ define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1545,7 +1545,7 @@ define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
index d94125d7e6ad..e07a48dc085e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 
 declare void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>*, i32);
 
@@ -9,7 +9,7 @@ define void @intrinsic_vse1_v_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, i32 %2)
   ret void
@@ -22,7 +22,7 @@ define void @intrinsic_vse1_v_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, i32 %2)
   ret void
@@ -35,7 +35,7 @@ define void @intrinsic_vse1_v_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, i32 %2)
   ret void
@@ -48,7 +48,7 @@ define void @intrinsic_vse1_v_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, i32 %2)
   ret void
@@ -61,7 +61,7 @@ define void @intrinsic_vse1_v_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, i32 %2)
   ret void
@@ -74,7 +74,7 @@ define void @intrinsic_vse1_v_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, i32 %2)
   ret void
@@ -87,7 +87,7 @@ define void @intrinsic_vse1_v_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i32 %2)
   ret void

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
index 48d4585c01cd..a1dd8a8a4d13 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 
 declare void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>*, i64);
 
@@ -9,7 +9,7 @@ define void @intrinsic_vse1_v_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, i64 %2)
   ret void
@@ -22,7 +22,7 @@ define void @intrinsic_vse1_v_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, i64 %2)
   ret void
@@ -35,7 +35,7 @@ define void @intrinsic_vse1_v_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, i64 %2)
   ret void
@@ -48,7 +48,7 @@ define void @intrinsic_vse1_v_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, i64 %2)
   ret void
@@ -61,7 +61,7 @@ define void @intrinsic_vse1_v_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, i64 %2)
   ret void
@@ -74,7 +74,7 @@ define void @intrinsic_vse1_v_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, i64 %2)
   ret void
@@ -87,7 +87,7 @@ define void @intrinsic_vse1_v_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i64 %2)
   ret void

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
index aead67d06b1d..77b681eecbc5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %1,
@@ -52,7 +52,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %1,
@@ -93,7 +93,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %1,
@@ -134,7 +134,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %1,
@@ -175,7 +175,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf4_nxv1i64(<vscale x 1 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -195,7 +195,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %1,
@@ -216,7 +216,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf4_nxv2i64(<vscale x 2 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -236,7 +236,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %1,
@@ -257,7 +257,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf4_nxv4i64(<vscale x 4 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -277,7 +277,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %1,
@@ -298,7 +298,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf4_nxv8i64(<vscale x 8 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -318,7 +318,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %1,
@@ -339,7 +339,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_vf4_nxv1i32(<vscale x 1 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -359,7 +359,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %1,
@@ -380,7 +380,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_vf4_nxv2i32(<vscale x 2 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -400,7 +400,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %1,
@@ -421,7 +421,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_vf4_nxv4i32(<vscale x 4 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -441,7 +441,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %1,
@@ -462,7 +462,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_vf4_nxv8i32(<vscale x 8 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -482,7 +482,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %1,
@@ -503,7 +503,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_vf4_nxv16i32(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -523,7 +523,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_mask_vf4_nxv16i32(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %1,
@@ -544,7 +544,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_vf2_nxv1i32(<vscale x 1 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %1,
@@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_vf2_nxv2i32(<vscale x 2 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %1,
@@ -626,7 +626,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_vf2_nxv4i32(<vscale x 4 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -646,7 +646,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %1,
@@ -667,7 +667,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_vf2_nxv8i32(<vscale x 8 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -687,7 +687,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %1,
@@ -708,7 +708,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_vf2_nxv16i32(<vscale x 16 x i16> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -728,7 +728,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_mask_vf2_nxv16i32(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %1,
@@ -749,7 +749,7 @@ define <vscale x 1 x i16> @intrinsic_vsext_vf2_nxv1i16(<vscale x 1 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -769,7 +769,7 @@ define <vscale x 1 x i16> @intrinsic_vsext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %1,
@@ -790,7 +790,7 @@ define <vscale x 2 x i16> @intrinsic_vsext_vf2_nxv2i16(<vscale x 2 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -810,7 +810,7 @@ define <vscale x 2 x i16> @intrinsic_vsext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %1,
@@ -831,7 +831,7 @@ define <vscale x 4 x i16> @intrinsic_vsext_vf2_nxv4i16(<vscale x 4 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -851,7 +851,7 @@ define <vscale x 4 x i16> @intrinsic_vsext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %1,
@@ -872,7 +872,7 @@ define <vscale x 8 x i16> @intrinsic_vsext_vf2_nxv8i16(<vscale x 8 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -892,7 +892,7 @@ define <vscale x 8 x i16> @intrinsic_vsext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %1,
@@ -913,7 +913,7 @@ define <vscale x 16 x i16> @intrinsic_vsext_vf2_nxv16i16(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -933,7 +933,7 @@ define <vscale x 16 x i16> @intrinsic_vsext_mask_vf2_nxv16i16(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %1,
@@ -954,7 +954,7 @@ define <vscale x 32 x i16> @intrinsic_vsext_vf2_nxv32i16(<vscale x 32 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -974,7 +974,7 @@ define <vscale x 32 x i16> @intrinsic_vsext_mask_vf2_nxv32i16(<vscale x 32 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %1,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
index 6014e2731be7..96b9fd82400a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %1,
@@ -52,7 +52,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf8_nxv2i64(<vscale x 2 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %1,
@@ -93,7 +93,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf8_nxv4i64(<vscale x 4 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %1,
@@ -134,7 +134,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf8_nxv8i64(<vscale x 8 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %1,
@@ -175,7 +175,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf4_nxv1i64(<vscale x 1 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -195,7 +195,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %1,
@@ -216,7 +216,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf4_nxv2i64(<vscale x 2 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -236,7 +236,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %1,
@@ -257,7 +257,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf4_nxv4i64(<vscale x 4 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -277,7 +277,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %1,
@@ -298,7 +298,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf4_nxv8i64(<vscale x 8 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -318,7 +318,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %1,
@@ -339,7 +339,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_vf4_nxv1i32(<vscale x 1 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -359,7 +359,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %1,
@@ -380,7 +380,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_vf4_nxv2i32(<vscale x 2 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -400,7 +400,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %1,
@@ -421,7 +421,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_vf4_nxv4i32(<vscale x 4 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -441,7 +441,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %1,
@@ -462,7 +462,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_vf4_nxv8i32(<vscale x 8 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -482,7 +482,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %1,
@@ -503,7 +503,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_vf4_nxv16i32(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -523,7 +523,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_mask_vf4_nxv16i32(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %1,
@@ -544,7 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf2_nxv1i64(<vscale x 1 x i32> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %1,
@@ -585,7 +585,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf2_nxv2i64(<vscale x 2 x i32> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %1,
@@ -626,7 +626,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf2_nxv4i64(<vscale x 4 x i32> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -646,7 +646,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %1,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf2_nxv8i64(<vscale x 8 x i32> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %1,
@@ -708,7 +708,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_vf2_nxv1i32(<vscale x 1 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -728,7 +728,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %1,
@@ -749,7 +749,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_vf2_nxv2i32(<vscale x 2 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %1,
@@ -790,7 +790,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_vf2_nxv4i32(<vscale x 4 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -810,7 +810,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %1,
@@ -831,7 +831,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_vf2_nxv8i32(<vscale x 8 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -851,7 +851,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %1,
@@ -872,7 +872,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_vf2_nxv16i32(<vscale x 16 x i16> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -892,7 +892,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_mask_vf2_nxv16i32(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %1,
@@ -913,7 +913,7 @@ define <vscale x 1 x i16> @intrinsic_vsext_vf2_nxv1i16(<vscale x 1 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -933,7 +933,7 @@ define <vscale x 1 x i16> @intrinsic_vsext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %1,
@@ -954,7 +954,7 @@ define <vscale x 2 x i16> @intrinsic_vsext_vf2_nxv2i16(<vscale x 2 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -974,7 +974,7 @@ define <vscale x 2 x i16> @intrinsic_vsext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %1,
@@ -995,7 +995,7 @@ define <vscale x 4 x i16> @intrinsic_vsext_vf2_nxv4i16(<vscale x 4 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1015,7 +1015,7 @@ define <vscale x 4 x i16> @intrinsic_vsext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %1,
@@ -1036,7 +1036,7 @@ define <vscale x 8 x i16> @intrinsic_vsext_vf2_nxv8i16(<vscale x 8 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1056,7 +1056,7 @@ define <vscale x 8 x i16> @intrinsic_vsext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %1,
@@ -1077,7 +1077,7 @@ define <vscale x 16 x i16> @intrinsic_vsext_vf2_nxv16i16(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1097,7 +1097,7 @@ define <vscale x 16 x i16> @intrinsic_vsext_mask_vf2_nxv16i16(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %1,
@@ -1118,7 +1118,7 @@ define <vscale x 32 x i16> @intrinsic_vsext_vf2_nxv32i16(<vscale x 32 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 32 x i16> @intrinsic_vsext_mask_vf2_nxv32i16(<vscale x 32 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %1,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
index b6cda065387e..da86323db3c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -319,7 +319,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -341,7 +341,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -363,7 +363,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -385,7 +385,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -407,7 +407,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -429,7 +429,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -451,7 +451,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -473,7 +473,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -517,7 +517,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -539,7 +539,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -561,7 +561,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -583,7 +583,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -627,7 +627,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -649,7 +649,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -671,7 +671,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -693,7 +693,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -715,7 +715,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -737,7 +737,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -759,7 +759,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -805,7 +805,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v25, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v25, a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -831,7 +831,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64(<vs
 ; CHECK-NEXT:    vslide1down.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v25, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -855,7 +855,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v26, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v26, a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -881,7 +881,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64(<vs
 ; CHECK-NEXT:    vslide1down.vx v26, v26, a1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v26, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -905,7 +905,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v28, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v28, a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -931,7 +931,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64(<vs
 ; CHECK-NEXT:    vslide1down.vx v28, v28, a1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v28, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -955,7 +955,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -981,7 +981,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64(<vs
 ; CHECK-NEXT:    vslide1down.vx v16, v16, a1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
index dccc604b6f4a..1f11be1d8ee0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -297,7 +297,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -319,7 +319,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -341,7 +341,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -363,7 +363,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -385,7 +385,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -407,7 +407,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -429,7 +429,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -451,7 +451,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -473,7 +473,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -517,7 +517,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -539,7 +539,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -561,7 +561,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -583,7 +583,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -627,7 +627,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -649,7 +649,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -671,7 +671,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -693,7 +693,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -715,7 +715,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -737,7 +737,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -759,7 +759,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -781,7 +781,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -803,7 +803,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -825,7 +825,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -847,7 +847,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -891,7 +891,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -913,7 +913,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -935,7 +935,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -957,7 +957,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
index eb907eaad23e..5a1f0a457bf8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -304,7 +304,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -327,7 +327,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -529,7 +529,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -552,7 +552,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -574,7 +574,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -597,7 +597,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -709,7 +709,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -732,7 +732,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -754,7 +754,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -777,7 +777,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -799,7 +799,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -823,7 +823,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v25, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -849,7 +849,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vsca
 ; CHECK-NEXT:    vslide1up.vx v26, v25, a0
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v26, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -873,7 +873,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v26, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -899,7 +899,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vsca
 ; CHECK-NEXT:    vslide1up.vx v28, v26, a0
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v28, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -923,7 +923,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v28, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -949,7 +949,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vsca
 ; CHECK-NEXT:    vslide1up.vx v12, v28, a0
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -973,7 +973,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -999,7 +999,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vsca
 ; CHECK-NEXT:    vslide1up.vx v16, v24, a0
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
index e69e4adb1cd1..830a7533cf8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i8> @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i8> @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i8> @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i8> @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i8> @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -304,7 +304,7 @@ define <vscale x 64 x i8> @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -327,7 +327,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 1 x i16> @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -372,7 +372,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 2 x i16> @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -417,7 +417,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 4 x i16> @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -462,7 +462,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 8 x i16> @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -507,7 +507,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -529,7 +529,7 @@ define <vscale x 16 x i16> @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -552,7 +552,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -574,7 +574,7 @@ define <vscale x 32 x i16> @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -597,7 +597,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 1 x i32> @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -642,7 +642,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 2 x i32> @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -709,7 +709,7 @@ define <vscale x 4 x i32> @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -732,7 +732,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -754,7 +754,7 @@ define <vscale x 8 x i32> @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -777,7 +777,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -799,7 +799,7 @@ define <vscale x 16 x i32> @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -822,7 +822,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -844,7 +844,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -867,7 +867,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -889,7 +889,7 @@ define <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -912,7 +912,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -934,7 +934,7 @@ define <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -957,7 +957,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -979,7 +979,7 @@ define <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
index a8aaee03fac8..a389a20be28a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -91,7 +91,7 @@ define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -114,7 +114,7 @@ define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -131,7 +131,7 @@ define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -170,7 +170,7 @@ define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -226,7 +226,7 @@ define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -249,7 +249,7 @@ define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@ define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -289,7 +289,7 @@ define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -305,7 +305,7 @@ define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -328,7 +328,7 @@ define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -351,7 +351,7 @@ define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -368,7 +368,7 @@ define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -384,7 +384,7 @@ define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -407,7 +407,7 @@ define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -430,7 +430,7 @@ define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -447,7 +447,7 @@ define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -463,7 +463,7 @@ define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -486,7 +486,7 @@ define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -526,7 +526,7 @@ define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -542,7 +542,7 @@ define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -565,7 +565,7 @@ define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -588,7 +588,7 @@ define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@ define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -621,7 +621,7 @@ define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -644,7 +644,7 @@ define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -667,7 +667,7 @@ define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -684,7 +684,7 @@ define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -700,7 +700,7 @@ define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -723,7 +723,7 @@ define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -746,7 +746,7 @@ define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -763,7 +763,7 @@ define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -779,7 +779,7 @@ define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -802,7 +802,7 @@ define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -825,7 +825,7 @@ define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -842,7 +842,7 @@ define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -904,7 +904,7 @@ define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -921,7 +921,7 @@ define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -937,7 +937,7 @@ define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -960,7 +960,7 @@ define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -983,7 +983,7 @@ define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1000,7 +1000,7 @@ define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1016,7 +1016,7 @@ define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1039,7 +1039,7 @@ define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1079,7 +1079,7 @@ define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1095,7 +1095,7 @@ define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1174,7 +1174,7 @@ define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1197,7 +1197,7 @@ define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1220,7 +1220,7 @@ define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1237,7 +1237,7 @@ define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1253,7 +1253,7 @@ define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1299,7 +1299,7 @@ define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1316,7 +1316,7 @@ define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1332,7 +1332,7 @@ define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1355,7 +1355,7 @@ define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1378,7 +1378,7 @@ define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1411,7 +1411,7 @@ define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1434,7 +1434,7 @@ define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1457,7 +1457,7 @@ define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1474,7 +1474,7 @@ define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1490,7 +1490,7 @@ define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1513,7 +1513,7 @@ define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1569,7 +1569,7 @@ define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1592,7 +1592,7 @@ define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1615,7 +1615,7 @@ define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1632,7 +1632,7 @@ define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1648,7 +1648,7 @@ define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1671,7 +1671,7 @@ define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1694,7 +1694,7 @@ define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1711,7 +1711,7 @@ define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1727,7 +1727,7 @@ define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1773,7 +1773,7 @@ define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1790,7 +1790,7 @@ define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1829,7 +1829,7 @@ define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1852,7 +1852,7 @@ define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1869,7 +1869,7 @@ define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1908,7 +1908,7 @@ define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1931,7 +1931,7 @@ define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1948,7 +1948,7 @@ define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1964,7 +1964,7 @@ define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1987,7 +1987,7 @@ define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2027,7 +2027,7 @@ define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2043,7 +2043,7 @@ define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2066,7 +2066,7 @@ define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2122,7 +2122,7 @@ define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2145,7 +2145,7 @@ define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2168,7 +2168,7 @@ define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2201,7 +2201,7 @@ define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2224,7 +2224,7 @@ define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2264,7 +2264,7 @@ define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2280,7 +2280,7 @@ define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2303,7 +2303,7 @@ define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2326,7 +2326,7 @@ define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2343,7 +2343,7 @@ define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2359,7 +2359,7 @@ define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
index 3588f07bd613..908419631552 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -91,7 +91,7 @@ define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -114,7 +114,7 @@ define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -131,7 +131,7 @@ define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -170,7 +170,7 @@ define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -226,7 +226,7 @@ define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -249,7 +249,7 @@ define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@ define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -289,7 +289,7 @@ define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -305,7 +305,7 @@ define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -328,7 +328,7 @@ define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -351,7 +351,7 @@ define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -368,7 +368,7 @@ define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -384,7 +384,7 @@ define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -407,7 +407,7 @@ define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -430,7 +430,7 @@ define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -447,7 +447,7 @@ define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -463,7 +463,7 @@ define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -486,7 +486,7 @@ define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -526,7 +526,7 @@ define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -542,7 +542,7 @@ define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -565,7 +565,7 @@ define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -588,7 +588,7 @@ define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@ define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -621,7 +621,7 @@ define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -644,7 +644,7 @@ define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -667,7 +667,7 @@ define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -684,7 +684,7 @@ define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -700,7 +700,7 @@ define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -723,7 +723,7 @@ define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -746,7 +746,7 @@ define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -763,7 +763,7 @@ define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -779,7 +779,7 @@ define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -802,7 +802,7 @@ define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -825,7 +825,7 @@ define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -842,7 +842,7 @@ define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -904,7 +904,7 @@ define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -921,7 +921,7 @@ define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -937,7 +937,7 @@ define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -960,7 +960,7 @@ define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -983,7 +983,7 @@ define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1000,7 +1000,7 @@ define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1016,7 +1016,7 @@ define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1039,7 +1039,7 @@ define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1079,7 +1079,7 @@ define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1095,7 +1095,7 @@ define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1174,7 +1174,7 @@ define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1197,7 +1197,7 @@ define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1220,7 +1220,7 @@ define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1237,7 +1237,7 @@ define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1253,7 +1253,7 @@ define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1299,7 +1299,7 @@ define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1316,7 +1316,7 @@ define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1332,7 +1332,7 @@ define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1355,7 +1355,7 @@ define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1378,7 +1378,7 @@ define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1411,7 +1411,7 @@ define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1434,7 +1434,7 @@ define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1457,7 +1457,7 @@ define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1474,7 +1474,7 @@ define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1490,7 +1490,7 @@ define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1513,7 +1513,7 @@ define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1569,7 +1569,7 @@ define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1592,7 +1592,7 @@ define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1615,7 +1615,7 @@ define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1632,7 +1632,7 @@ define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1648,7 +1648,7 @@ define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1671,7 +1671,7 @@ define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1694,7 +1694,7 @@ define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1711,7 +1711,7 @@ define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1727,7 +1727,7 @@ define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1773,7 +1773,7 @@ define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1790,7 +1790,7 @@ define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1829,7 +1829,7 @@ define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1852,7 +1852,7 @@ define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1869,7 +1869,7 @@ define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1908,7 +1908,7 @@ define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1931,7 +1931,7 @@ define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1948,7 +1948,7 @@ define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1964,7 +1964,7 @@ define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1987,7 +1987,7 @@ define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2027,7 +2027,7 @@ define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2043,7 +2043,7 @@ define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2066,7 +2066,7 @@ define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2122,7 +2122,7 @@ define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2145,7 +2145,7 @@ define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2168,7 +2168,7 @@ define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2201,7 +2201,7 @@ define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2224,7 +2224,7 @@ define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2264,7 +2264,7 @@ define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2280,7 +2280,7 @@ define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2303,7 +2303,7 @@ define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2326,7 +2326,7 @@ define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2343,7 +2343,7 @@ define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2359,7 +2359,7 @@ define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
index 5188670f4106..7d9eb8e79549 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -91,7 +91,7 @@ define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -114,7 +114,7 @@ define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -131,7 +131,7 @@ define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -170,7 +170,7 @@ define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -226,7 +226,7 @@ define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -249,7 +249,7 @@ define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@ define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -289,7 +289,7 @@ define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -305,7 +305,7 @@ define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -328,7 +328,7 @@ define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -351,7 +351,7 @@ define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -368,7 +368,7 @@ define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -384,7 +384,7 @@ define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -407,7 +407,7 @@ define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -430,7 +430,7 @@ define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -447,7 +447,7 @@ define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -463,7 +463,7 @@ define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -486,7 +486,7 @@ define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -526,7 +526,7 @@ define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -542,7 +542,7 @@ define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -565,7 +565,7 @@ define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -588,7 +588,7 @@ define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@ define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -621,7 +621,7 @@ define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -644,7 +644,7 @@ define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -667,7 +667,7 @@ define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -684,7 +684,7 @@ define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -700,7 +700,7 @@ define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -723,7 +723,7 @@ define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -746,7 +746,7 @@ define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -763,7 +763,7 @@ define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -779,7 +779,7 @@ define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -802,7 +802,7 @@ define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -825,7 +825,7 @@ define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -842,7 +842,7 @@ define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -904,7 +904,7 @@ define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -921,7 +921,7 @@ define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -937,7 +937,7 @@ define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -960,7 +960,7 @@ define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -983,7 +983,7 @@ define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1000,7 +1000,7 @@ define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1016,7 +1016,7 @@ define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1039,7 +1039,7 @@ define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1079,7 +1079,7 @@ define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1095,7 +1095,7 @@ define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1174,7 +1174,7 @@ define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1197,7 +1197,7 @@ define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1220,7 +1220,7 @@ define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1237,7 +1237,7 @@ define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1253,7 +1253,7 @@ define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1299,7 +1299,7 @@ define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1316,7 +1316,7 @@ define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1332,7 +1332,7 @@ define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1355,7 +1355,7 @@ define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1378,7 +1378,7 @@ define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1411,7 +1411,7 @@ define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1434,7 +1434,7 @@ define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1457,7 +1457,7 @@ define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1474,7 +1474,7 @@ define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1490,7 +1490,7 @@ define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1513,7 +1513,7 @@ define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1569,7 +1569,7 @@ define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1592,7 +1592,7 @@ define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1615,7 +1615,7 @@ define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1632,7 +1632,7 @@ define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1648,7 +1648,7 @@ define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1671,7 +1671,7 @@ define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1694,7 +1694,7 @@ define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1711,7 +1711,7 @@ define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1727,7 +1727,7 @@ define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1773,7 +1773,7 @@ define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1790,7 +1790,7 @@ define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1829,7 +1829,7 @@ define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1852,7 +1852,7 @@ define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1869,7 +1869,7 @@ define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1908,7 +1908,7 @@ define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1931,7 +1931,7 @@ define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1948,7 +1948,7 @@ define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1964,7 +1964,7 @@ define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1987,7 +1987,7 @@ define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2027,7 +2027,7 @@ define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2043,7 +2043,7 @@ define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2066,7 +2066,7 @@ define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2122,7 +2122,7 @@ define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2145,7 +2145,7 @@ define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2168,7 +2168,7 @@ define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2201,7 +2201,7 @@ define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2224,7 +2224,7 @@ define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2264,7 +2264,7 @@ define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2280,7 +2280,7 @@ define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2303,7 +2303,7 @@ define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2326,7 +2326,7 @@ define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2343,7 +2343,7 @@ define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2359,7 +2359,7 @@ define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
index 07e2286b2296..bb0c37dcc303 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@ define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@ define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -91,7 +91,7 @@ define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -114,7 +114,7 @@ define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -131,7 +131,7 @@ define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -147,7 +147,7 @@ define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -170,7 +170,7 @@ define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -193,7 +193,7 @@ define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -210,7 +210,7 @@ define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -226,7 +226,7 @@ define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -249,7 +249,7 @@ define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@ define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -289,7 +289,7 @@ define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -305,7 +305,7 @@ define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -328,7 +328,7 @@ define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -351,7 +351,7 @@ define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -368,7 +368,7 @@ define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -384,7 +384,7 @@ define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -407,7 +407,7 @@ define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -430,7 +430,7 @@ define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -447,7 +447,7 @@ define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -463,7 +463,7 @@ define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -486,7 +486,7 @@ define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -526,7 +526,7 @@ define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -542,7 +542,7 @@ define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -565,7 +565,7 @@ define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -588,7 +588,7 @@ define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@ define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -621,7 +621,7 @@ define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -644,7 +644,7 @@ define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -667,7 +667,7 @@ define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -684,7 +684,7 @@ define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -700,7 +700,7 @@ define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -723,7 +723,7 @@ define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -746,7 +746,7 @@ define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -763,7 +763,7 @@ define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -779,7 +779,7 @@ define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -802,7 +802,7 @@ define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -825,7 +825,7 @@ define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -842,7 +842,7 @@ define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -858,7 +858,7 @@ define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -881,7 +881,7 @@ define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -904,7 +904,7 @@ define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -921,7 +921,7 @@ define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -937,7 +937,7 @@ define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -960,7 +960,7 @@ define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -983,7 +983,7 @@ define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1000,7 +1000,7 @@ define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1016,7 +1016,7 @@ define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1039,7 +1039,7 @@ define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1079,7 +1079,7 @@ define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1095,7 +1095,7 @@ define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1118,7 +1118,7 @@ define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@ define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1174,7 +1174,7 @@ define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1197,7 +1197,7 @@ define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1220,7 +1220,7 @@ define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1237,7 +1237,7 @@ define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1253,7 +1253,7 @@ define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1276,7 +1276,7 @@ define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1299,7 +1299,7 @@ define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1316,7 +1316,7 @@ define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1332,7 +1332,7 @@ define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1355,7 +1355,7 @@ define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1378,7 +1378,7 @@ define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1395,7 +1395,7 @@ define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1411,7 +1411,7 @@ define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1434,7 +1434,7 @@ define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1457,7 +1457,7 @@ define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1474,7 +1474,7 @@ define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1490,7 +1490,7 @@ define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1513,7 +1513,7 @@ define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1536,7 +1536,7 @@ define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1553,7 +1553,7 @@ define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1569,7 +1569,7 @@ define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1592,7 +1592,7 @@ define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1615,7 +1615,7 @@ define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1632,7 +1632,7 @@ define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1648,7 +1648,7 @@ define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1671,7 +1671,7 @@ define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1694,7 +1694,7 @@ define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1711,7 +1711,7 @@ define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1727,7 +1727,7 @@ define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1750,7 +1750,7 @@ define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1773,7 +1773,7 @@ define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1790,7 +1790,7 @@ define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1806,7 +1806,7 @@ define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1829,7 +1829,7 @@ define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1852,7 +1852,7 @@ define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1869,7 +1869,7 @@ define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1908,7 +1908,7 @@ define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1931,7 +1931,7 @@ define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1948,7 +1948,7 @@ define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1964,7 +1964,7 @@ define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1987,7 +1987,7 @@ define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2027,7 +2027,7 @@ define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2043,7 +2043,7 @@ define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2066,7 +2066,7 @@ define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2122,7 +2122,7 @@ define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2145,7 +2145,7 @@ define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2168,7 +2168,7 @@ define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2201,7 +2201,7 @@ define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2224,7 +2224,7 @@ define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2247,7 +2247,7 @@ define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2264,7 +2264,7 @@ define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2280,7 +2280,7 @@ define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2303,7 +2303,7 @@ define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2326,7 +2326,7 @@ define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2343,7 +2343,7 @@ define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2359,7 +2359,7 @@ define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
index a85288a15b80..6a803ca44f85 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
index 9e6c6255f9a2..5826a78816b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
index 93d6a7223901..e0f63a7cb35f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsmul.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsmul.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsmul.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
index 083b4d5670a5..c131a622b868 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
index 142d74078926..44c020f457e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -12,7 +12,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -219,7 +219,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -242,7 +242,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -265,7 +265,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -288,7 +288,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -357,7 +357,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -380,7 +380,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -587,7 +587,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -610,7 +610,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -633,7 +633,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -656,7 +656,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -679,7 +679,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -702,7 +702,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -725,7 +725,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -748,7 +748,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -771,7 +771,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -794,7 +794,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -817,7 +817,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -840,7 +840,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -886,7 +886,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -909,7 +909,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -932,7 +932,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -955,7 +955,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -978,7 +978,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1001,7 +1001,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1024,7 +1024,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1047,7 +1047,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1070,7 +1070,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1093,7 +1093,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1116,7 +1116,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1139,7 +1139,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1162,7 +1162,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1185,7 +1185,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1208,7 +1208,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1231,7 +1231,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1254,7 +1254,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1277,7 +1277,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1300,7 +1300,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1323,7 +1323,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1346,7 +1346,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1369,7 +1369,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1392,7 +1392,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1415,7 +1415,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1438,7 +1438,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1461,7 +1461,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1484,7 +1484,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1507,7 +1507,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1530,7 +1530,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1553,7 +1553,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1576,7 +1576,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1599,7 +1599,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1622,7 +1622,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1645,7 +1645,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1668,7 +1668,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1691,7 +1691,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1714,7 +1714,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1737,7 +1737,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1760,7 +1760,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1783,7 +1783,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1806,7 +1806,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1829,7 +1829,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1852,7 +1852,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1875,7 +1875,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1898,7 +1898,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1921,7 +1921,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1944,7 +1944,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1967,7 +1967,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1990,7 +1990,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2013,7 +2013,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2036,7 +2036,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2059,7 +2059,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2082,7 +2082,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2105,7 +2105,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2128,7 +2128,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2151,7 +2151,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2174,7 +2174,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2197,7 +2197,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2220,7 +2220,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2243,7 +2243,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2266,7 +2266,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2289,7 +2289,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2312,7 +2312,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2335,7 +2335,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2358,7 +2358,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2381,7 +2381,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2404,7 +2404,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2427,7 +2427,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2450,7 +2450,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2473,7 +2473,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2496,7 +2496,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2519,7 +2519,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2542,7 +2542,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2565,7 +2565,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2588,7 +2588,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2611,7 +2611,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2634,7 +2634,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2657,7 +2657,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2680,7 +2680,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2703,7 +2703,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2726,7 +2726,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2749,7 +2749,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2772,7 +2772,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2795,7 +2795,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2818,7 +2818,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2841,7 +2841,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2864,7 +2864,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2887,7 +2887,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2910,7 +2910,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2933,7 +2933,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2956,7 +2956,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2979,7 +2979,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -3002,7 +3002,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3025,7 +3025,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3048,7 +3048,7 @@ define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3071,7 +3071,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3094,7 +3094,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3117,7 +3117,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3140,7 +3140,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3163,7 +3163,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3186,7 +3186,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3209,7 +3209,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3232,7 +3232,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3255,7 +3255,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3278,7 +3278,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3301,7 +3301,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3324,7 +3324,7 @@ define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3347,7 +3347,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3370,7 +3370,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3393,7 +3393,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3416,7 +3416,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3439,7 +3439,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3462,7 +3462,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3485,7 +3485,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3508,7 +3508,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3531,7 +3531,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3554,7 +3554,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3600,7 +3600,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3623,7 +3623,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3646,7 +3646,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3669,7 +3669,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3692,7 +3692,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3715,7 +3715,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3738,7 +3738,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3761,7 +3761,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3784,7 +3784,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3807,7 +3807,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3830,7 +3830,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3853,7 +3853,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3876,7 +3876,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3899,7 +3899,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3922,7 +3922,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3945,7 +3945,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3968,7 +3968,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3991,7 +3991,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -4014,7 +4014,7 @@ define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4037,7 +4037,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4060,7 +4060,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4083,7 +4083,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4106,7 +4106,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4129,7 +4129,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4152,7 +4152,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4175,7 +4175,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4198,7 +4198,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4221,7 +4221,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4244,7 +4244,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4267,7 +4267,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4290,7 +4290,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4313,7 +4313,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4336,7 +4336,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4359,7 +4359,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4382,7 +4382,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4405,7 +4405,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4428,7 +4428,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4451,7 +4451,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4474,7 +4474,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4497,7 +4497,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4520,7 +4520,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4543,7 +4543,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4566,7 +4566,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4589,7 +4589,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4612,7 +4612,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4635,7 +4635,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4658,7 +4658,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4681,7 +4681,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4704,7 +4704,7 @@ define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4727,7 +4727,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4750,7 +4750,7 @@ define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4773,7 +4773,7 @@ define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4796,7 +4796,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4819,7 +4819,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4842,7 +4842,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4865,7 +4865,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4888,7 +4888,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4911,7 +4911,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4934,7 +4934,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4957,7 +4957,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4980,7 +4980,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5003,7 +5003,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5026,7 +5026,7 @@ define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5049,7 +5049,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5072,7 +5072,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5095,7 +5095,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5118,7 +5118,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5141,7 +5141,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5164,7 +5164,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5187,7 +5187,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5210,7 +5210,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5233,7 +5233,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5256,7 +5256,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5279,7 +5279,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5302,7 +5302,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5325,7 +5325,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5348,7 +5348,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5371,7 +5371,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5394,7 +5394,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5417,7 +5417,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5440,7 +5440,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5463,7 +5463,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5486,7 +5486,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5509,7 +5509,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5532,7 +5532,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5555,7 +5555,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5578,7 +5578,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5601,7 +5601,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5624,7 +5624,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5647,7 +5647,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5670,7 +5670,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5693,7 +5693,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5716,7 +5716,7 @@ define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5739,7 +5739,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5762,7 +5762,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5785,7 +5785,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5808,7 +5808,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5831,7 +5831,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5854,7 +5854,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5877,7 +5877,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5900,7 +5900,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5923,7 +5923,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5946,7 +5946,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5969,7 +5969,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5992,7 +5992,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6015,7 +6015,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6038,7 +6038,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6061,7 +6061,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6084,7 +6084,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6107,7 +6107,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6130,7 +6130,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
@@ -6153,7 +6153,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
index d8cc06fb6130..4a13cb311c3a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -12,7 +12,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -219,7 +219,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -242,7 +242,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -265,7 +265,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -288,7 +288,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -357,7 +357,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -380,7 +380,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -587,7 +587,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -610,7 +610,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -633,7 +633,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -656,7 +656,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -679,7 +679,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -702,7 +702,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -725,7 +725,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -748,7 +748,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -771,7 +771,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -794,7 +794,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -817,7 +817,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -840,7 +840,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -886,7 +886,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -909,7 +909,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -932,7 +932,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -955,7 +955,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -978,7 +978,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1001,7 +1001,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1024,7 +1024,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1047,7 +1047,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1070,7 +1070,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1093,7 +1093,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1116,7 +1116,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1139,7 +1139,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1162,7 +1162,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1185,7 +1185,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1208,7 +1208,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1231,7 +1231,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1254,7 +1254,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1277,7 +1277,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1300,7 +1300,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1323,7 +1323,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1346,7 +1346,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1369,7 +1369,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1392,7 +1392,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1415,7 +1415,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1438,7 +1438,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1461,7 +1461,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1484,7 +1484,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1507,7 +1507,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1530,7 +1530,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1553,7 +1553,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1576,7 +1576,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1599,7 +1599,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1622,7 +1622,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1645,7 +1645,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1668,7 +1668,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1691,7 +1691,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1714,7 +1714,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1737,7 +1737,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1760,7 +1760,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1783,7 +1783,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1806,7 +1806,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1829,7 +1829,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1852,7 +1852,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1875,7 +1875,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1898,7 +1898,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1921,7 +1921,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1944,7 +1944,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1967,7 +1967,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1990,7 +1990,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2013,7 +2013,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2036,7 +2036,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2059,7 +2059,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2082,7 +2082,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2105,7 +2105,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2128,7 +2128,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2151,7 +2151,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2174,7 +2174,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2197,7 +2197,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2220,7 +2220,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2243,7 +2243,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2266,7 +2266,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2289,7 +2289,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2312,7 +2312,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2335,7 +2335,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2358,7 +2358,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2381,7 +2381,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2404,7 +2404,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2427,7 +2427,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2450,7 +2450,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2473,7 +2473,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2496,7 +2496,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2519,7 +2519,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2542,7 +2542,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2565,7 +2565,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2588,7 +2588,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2611,7 +2611,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2634,7 +2634,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2657,7 +2657,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2680,7 +2680,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2703,7 +2703,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2726,7 +2726,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2749,7 +2749,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2772,7 +2772,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2795,7 +2795,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2818,7 +2818,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2841,7 +2841,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2864,7 +2864,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2887,7 +2887,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2910,7 +2910,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2933,7 +2933,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2956,7 +2956,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2979,7 +2979,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -3002,7 +3002,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3025,7 +3025,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3048,7 +3048,7 @@ define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3071,7 +3071,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3094,7 +3094,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3117,7 +3117,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3140,7 +3140,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3163,7 +3163,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3186,7 +3186,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3209,7 +3209,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3232,7 +3232,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3255,7 +3255,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3278,7 +3278,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3301,7 +3301,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3324,7 +3324,7 @@ define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3347,7 +3347,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3370,7 +3370,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3393,7 +3393,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3416,7 +3416,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3439,7 +3439,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3462,7 +3462,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3485,7 +3485,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3508,7 +3508,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3531,7 +3531,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3554,7 +3554,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3600,7 +3600,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3623,7 +3623,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3646,7 +3646,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3669,7 +3669,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3692,7 +3692,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3715,7 +3715,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3738,7 +3738,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3761,7 +3761,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3784,7 +3784,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3807,7 +3807,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3830,7 +3830,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3853,7 +3853,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3876,7 +3876,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3899,7 +3899,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3922,7 +3922,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3945,7 +3945,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3968,7 +3968,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3991,7 +3991,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -4014,7 +4014,7 @@ define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4037,7 +4037,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4060,7 +4060,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4083,7 +4083,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4106,7 +4106,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4129,7 +4129,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4152,7 +4152,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4175,7 +4175,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4198,7 +4198,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4221,7 +4221,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4244,7 +4244,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4267,7 +4267,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4290,7 +4290,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4313,7 +4313,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4336,7 +4336,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4359,7 +4359,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4382,7 +4382,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4405,7 +4405,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4428,7 +4428,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4451,7 +4451,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4474,7 +4474,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4497,7 +4497,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4520,7 +4520,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4543,7 +4543,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4566,7 +4566,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4589,7 +4589,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4612,7 +4612,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4635,7 +4635,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4658,7 +4658,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4681,7 +4681,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4704,7 +4704,7 @@ define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4727,7 +4727,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4750,7 +4750,7 @@ define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4773,7 +4773,7 @@ define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4796,7 +4796,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4819,7 +4819,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4842,7 +4842,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4865,7 +4865,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4888,7 +4888,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4911,7 +4911,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4934,7 +4934,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4957,7 +4957,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4980,7 +4980,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5003,7 +5003,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5026,7 +5026,7 @@ define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5049,7 +5049,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5072,7 +5072,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5095,7 +5095,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5118,7 +5118,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5141,7 +5141,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5164,7 +5164,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5187,7 +5187,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5210,7 +5210,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5233,7 +5233,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5256,7 +5256,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5279,7 +5279,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5302,7 +5302,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5325,7 +5325,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5348,7 +5348,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5371,7 +5371,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5394,7 +5394,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5417,7 +5417,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5440,7 +5440,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5463,7 +5463,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5486,7 +5486,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5509,7 +5509,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5532,7 +5532,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5555,7 +5555,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5578,7 +5578,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5601,7 +5601,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5624,7 +5624,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5647,7 +5647,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5670,7 +5670,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5693,7 +5693,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5716,7 +5716,7 @@ define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5739,7 +5739,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5762,7 +5762,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5785,7 +5785,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5808,7 +5808,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5831,7 +5831,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5854,7 +5854,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5877,7 +5877,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5900,7 +5900,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5923,7 +5923,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5946,7 +5946,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5969,7 +5969,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5992,7 +5992,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6015,7 +6015,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6038,7 +6038,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6061,7 +6061,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6084,7 +6084,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6107,7 +6107,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6130,7 +6130,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
@@ -6153,7 +6153,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
index 3831cf7ef04c..7d11891ae6f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
index 3e078673e18c..38a78196b0d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
index 044a72950404..f7d931098a6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
index e32a30a12254..2d5113d2b281 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
index 69c4872564d9..ca2b8f6f1fd4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -12,7 +12,7 @@ define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -35,7 +35,7 @@ define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -58,7 +58,7 @@ define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -81,7 +81,7 @@ define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -104,7 +104,7 @@ define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -127,7 +127,7 @@ define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -150,7 +150,7 @@ define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -173,7 +173,7 @@ define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -196,7 +196,7 @@ define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f64(
     <vscale x 1 x double> %0,
@@ -219,7 +219,7 @@ define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -242,7 +242,7 @@ define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f64(
     <vscale x 2 x double> %0,
@@ -265,7 +265,7 @@ define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -288,7 +288,7 @@ define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f64(
     <vscale x 4 x double> %0,
@@ -311,7 +311,7 @@ define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -334,7 +334,7 @@ define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f64(
     <vscale x 8 x double> %0,
@@ -357,7 +357,7 @@ define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -380,7 +380,7 @@ define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@ define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@ define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@ define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@ define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@ define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@ define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -587,7 +587,7 @@ define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -610,7 +610,7 @@ define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f32(
     <vscale x 1 x float> %0,
@@ -633,7 +633,7 @@ define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -656,7 +656,7 @@ define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f32(
     <vscale x 2 x float> %0,
@@ -679,7 +679,7 @@ define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -702,7 +702,7 @@ define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f32(
     <vscale x 4 x float> %0,
@@ -725,7 +725,7 @@ define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -748,7 +748,7 @@ define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f32(
     <vscale x 8 x float> %0,
@@ -771,7 +771,7 @@ define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -794,7 +794,7 @@ define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16f32(
     <vscale x 16 x float> %0,
@@ -817,7 +817,7 @@ define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -840,7 +840,7 @@ define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -863,7 +863,7 @@ define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -886,7 +886,7 @@ define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -909,7 +909,7 @@ define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -932,7 +932,7 @@ define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -955,7 +955,7 @@ define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -978,7 +978,7 @@ define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1001,7 +1001,7 @@ define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1024,7 +1024,7 @@ define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1047,7 +1047,7 @@ define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1070,7 +1070,7 @@ define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1093,7 +1093,7 @@ define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1116,7 +1116,7 @@ define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1139,7 +1139,7 @@ define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1162,7 +1162,7 @@ define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1185,7 +1185,7 @@ define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1208,7 +1208,7 @@ define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1231,7 +1231,7 @@ define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1254,7 +1254,7 @@ define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1277,7 +1277,7 @@ define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1300,7 +1300,7 @@ define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1323,7 +1323,7 @@ define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1346,7 +1346,7 @@ define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1369,7 +1369,7 @@ define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1392,7 +1392,7 @@ define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1415,7 +1415,7 @@ define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1438,7 +1438,7 @@ define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1461,7 +1461,7 @@ define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1484,7 +1484,7 @@ define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1507,7 +1507,7 @@ define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1530,7 +1530,7 @@ define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1553,7 +1553,7 @@ define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1576,7 +1576,7 @@ define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1599,7 +1599,7 @@ define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1622,7 +1622,7 @@ define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1645,7 +1645,7 @@ define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1668,7 +1668,7 @@ define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1691,7 +1691,7 @@ define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
index c17030d6177e..546185686b7c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -12,7 +12,7 @@ define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -35,7 +35,7 @@ define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -58,7 +58,7 @@ define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -81,7 +81,7 @@ define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -104,7 +104,7 @@ define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -127,7 +127,7 @@ define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -150,7 +150,7 @@ define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -173,7 +173,7 @@ define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -196,7 +196,7 @@ define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f64(
     <vscale x 1 x double> %0,
@@ -219,7 +219,7 @@ define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -242,7 +242,7 @@ define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f64(
     <vscale x 2 x double> %0,
@@ -265,7 +265,7 @@ define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -288,7 +288,7 @@ define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f64(
     <vscale x 4 x double> %0,
@@ -311,7 +311,7 @@ define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -334,7 +334,7 @@ define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f64(
     <vscale x 8 x double> %0,
@@ -357,7 +357,7 @@ define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -380,7 +380,7 @@ define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@ define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@ define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@ define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@ define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@ define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@ define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -587,7 +587,7 @@ define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -610,7 +610,7 @@ define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f32(
     <vscale x 1 x float> %0,
@@ -633,7 +633,7 @@ define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -656,7 +656,7 @@ define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f32(
     <vscale x 2 x float> %0,
@@ -679,7 +679,7 @@ define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -702,7 +702,7 @@ define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f32(
     <vscale x 4 x float> %0,
@@ -725,7 +725,7 @@ define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -748,7 +748,7 @@ define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f32(
     <vscale x 8 x float> %0,
@@ -771,7 +771,7 @@ define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -794,7 +794,7 @@ define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16f32(
     <vscale x 16 x float> %0,
@@ -817,7 +817,7 @@ define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -840,7 +840,7 @@ define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -863,7 +863,7 @@ define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -886,7 +886,7 @@ define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -909,7 +909,7 @@ define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -932,7 +932,7 @@ define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -955,7 +955,7 @@ define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -978,7 +978,7 @@ define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1001,7 +1001,7 @@ define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1024,7 +1024,7 @@ define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1047,7 +1047,7 @@ define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1070,7 +1070,7 @@ define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1093,7 +1093,7 @@ define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1116,7 +1116,7 @@ define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1139,7 +1139,7 @@ define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1162,7 +1162,7 @@ define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1185,7 +1185,7 @@ define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1208,7 +1208,7 @@ define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1231,7 +1231,7 @@ define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1254,7 +1254,7 @@ define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1277,7 +1277,7 @@ define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1300,7 +1300,7 @@ define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1323,7 +1323,7 @@ define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1346,7 +1346,7 @@ define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1369,7 +1369,7 @@ define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1392,7 +1392,7 @@ define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1415,7 +1415,7 @@ define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1438,7 +1438,7 @@ define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1461,7 +1461,7 @@ define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1484,7 +1484,7 @@ define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1507,7 +1507,7 @@ define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1530,7 +1530,7 @@ define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1553,7 +1553,7 @@ define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1576,7 +1576,7 @@ define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1599,7 +1599,7 @@ define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1622,7 +1622,7 @@ define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1645,7 +1645,7 @@ define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1668,7 +1668,7 @@ define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1691,7 +1691,7 @@ define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv64i8(
     <vscale x 64 x i8> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
index 945b72f957e3..bc769ae4c067 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -960,7 +960,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -982,7 +982,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1004,7 +1004,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1026,7 +1026,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1048,7 +1048,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1136,7 +1136,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1180,7 +1180,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1202,7 +1202,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1224,7 +1224,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1246,7 +1246,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1268,7 +1268,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1290,7 +1290,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1312,7 +1312,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1334,7 +1334,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1356,7 +1356,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1378,7 +1378,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1400,7 +1400,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1422,7 +1422,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1444,7 +1444,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1466,7 +1466,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1488,7 +1488,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1510,7 +1510,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1532,7 +1532,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1554,7 +1554,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1576,7 +1576,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1598,7 +1598,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1620,7 +1620,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1642,7 +1642,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1664,7 +1664,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1686,7 +1686,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1708,7 +1708,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1730,7 +1730,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1752,7 +1752,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1769,7 +1769,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1784,7 +1784,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1816,7 +1816,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1833,7 +1833,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1848,7 +1848,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1865,7 +1865,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1912,7 +1912,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1976,7 +1976,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2008,7 +2008,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2040,7 +2040,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2104,7 +2104,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2136,7 +2136,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2168,7 +2168,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2200,7 +2200,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2232,7 +2232,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2264,7 +2264,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2328,7 +2328,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
index a7ac6f16fd80..45e796706341 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
index 67b0e77e0ca6..fa80883b6ea0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -960,7 +960,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -982,7 +982,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1004,7 +1004,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1026,7 +1026,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1048,7 +1048,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1136,7 +1136,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1158,7 +1158,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1180,7 +1180,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1202,7 +1202,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1224,7 +1224,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1246,7 +1246,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1268,7 +1268,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1290,7 +1290,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1312,7 +1312,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1334,7 +1334,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1356,7 +1356,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1378,7 +1378,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1400,7 +1400,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1422,7 +1422,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1444,7 +1444,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1466,7 +1466,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1488,7 +1488,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1510,7 +1510,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1532,7 +1532,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1554,7 +1554,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1576,7 +1576,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1598,7 +1598,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1620,7 +1620,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1642,7 +1642,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1664,7 +1664,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1686,7 +1686,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1708,7 +1708,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1730,7 +1730,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1752,7 +1752,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1769,7 +1769,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1784,7 +1784,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1801,7 +1801,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1816,7 +1816,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1833,7 +1833,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1848,7 +1848,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1865,7 +1865,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1880,7 +1880,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1897,7 +1897,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1912,7 +1912,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1976,7 +1976,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2008,7 +2008,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2040,7 +2040,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2072,7 +2072,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2104,7 +2104,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2136,7 +2136,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2168,7 +2168,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2200,7 +2200,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2232,7 +2232,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2264,7 +2264,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2328,7 +2328,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
index d6305809cb54..6c66ef515224 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
index 8b8df0481b67..69d60b322a04 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vssub.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vssub.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vssub.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
index b58822384bb1..70cc4caffead 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
index 4a9d28b1e8a3..7f08244401c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vssubu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vssubu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vssubu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
index 05e4dc9d14de..ce3a09bf119f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
index 659d93cbf2d1..01ec91460d4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
index d07fcc6c2411..ada9236d5dda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
index 32de7854aef8..386175a0eb57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -12,7 +12,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -219,7 +219,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -242,7 +242,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -265,7 +265,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -288,7 +288,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -357,7 +357,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -380,7 +380,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -587,7 +587,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -610,7 +610,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -633,7 +633,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -656,7 +656,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -679,7 +679,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -702,7 +702,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -725,7 +725,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -748,7 +748,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -771,7 +771,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -794,7 +794,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -817,7 +817,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -840,7 +840,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -886,7 +886,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -909,7 +909,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -932,7 +932,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -955,7 +955,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -978,7 +978,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1001,7 +1001,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1024,7 +1024,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1047,7 +1047,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1070,7 +1070,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1093,7 +1093,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1116,7 +1116,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1139,7 +1139,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1162,7 +1162,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1185,7 +1185,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1208,7 +1208,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1231,7 +1231,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1254,7 +1254,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1277,7 +1277,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1300,7 +1300,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1323,7 +1323,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1346,7 +1346,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1369,7 +1369,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1392,7 +1392,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1415,7 +1415,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1438,7 +1438,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1461,7 +1461,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1484,7 +1484,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1507,7 +1507,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1530,7 +1530,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1553,7 +1553,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1576,7 +1576,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1599,7 +1599,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1622,7 +1622,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1645,7 +1645,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1668,7 +1668,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1691,7 +1691,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1714,7 +1714,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1737,7 +1737,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1760,7 +1760,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1783,7 +1783,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1806,7 +1806,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1829,7 +1829,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1852,7 +1852,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1875,7 +1875,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1898,7 +1898,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1921,7 +1921,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1944,7 +1944,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1967,7 +1967,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1990,7 +1990,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2013,7 +2013,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2036,7 +2036,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2059,7 +2059,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2082,7 +2082,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2105,7 +2105,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2128,7 +2128,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2151,7 +2151,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2174,7 +2174,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2197,7 +2197,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2220,7 +2220,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2243,7 +2243,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2266,7 +2266,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2289,7 +2289,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2312,7 +2312,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2335,7 +2335,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2358,7 +2358,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2381,7 +2381,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2404,7 +2404,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2427,7 +2427,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2450,7 +2450,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2473,7 +2473,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2496,7 +2496,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2519,7 +2519,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2542,7 +2542,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2565,7 +2565,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2588,7 +2588,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2611,7 +2611,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2634,7 +2634,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2657,7 +2657,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2680,7 +2680,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2703,7 +2703,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2726,7 +2726,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2749,7 +2749,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2772,7 +2772,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2795,7 +2795,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2818,7 +2818,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2841,7 +2841,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2864,7 +2864,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2887,7 +2887,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2910,7 +2910,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2933,7 +2933,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2956,7 +2956,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2979,7 +2979,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -3002,7 +3002,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3025,7 +3025,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3048,7 +3048,7 @@ define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3071,7 +3071,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3094,7 +3094,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3117,7 +3117,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3140,7 +3140,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3163,7 +3163,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3186,7 +3186,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3209,7 +3209,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3232,7 +3232,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3255,7 +3255,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3278,7 +3278,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3301,7 +3301,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3324,7 +3324,7 @@ define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3347,7 +3347,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3370,7 +3370,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3393,7 +3393,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3416,7 +3416,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3439,7 +3439,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3462,7 +3462,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3485,7 +3485,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3508,7 +3508,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3531,7 +3531,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3554,7 +3554,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3600,7 +3600,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3623,7 +3623,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3646,7 +3646,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3669,7 +3669,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3692,7 +3692,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3715,7 +3715,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3738,7 +3738,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3761,7 +3761,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3784,7 +3784,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3807,7 +3807,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3830,7 +3830,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3853,7 +3853,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3876,7 +3876,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3899,7 +3899,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3922,7 +3922,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3945,7 +3945,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3968,7 +3968,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3991,7 +3991,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -4014,7 +4014,7 @@ define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4037,7 +4037,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4060,7 +4060,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4083,7 +4083,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4106,7 +4106,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4129,7 +4129,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4152,7 +4152,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4175,7 +4175,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4198,7 +4198,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4221,7 +4221,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4244,7 +4244,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4267,7 +4267,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4290,7 +4290,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4313,7 +4313,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4336,7 +4336,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4359,7 +4359,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4382,7 +4382,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4405,7 +4405,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4428,7 +4428,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4451,7 +4451,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4474,7 +4474,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4497,7 +4497,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4520,7 +4520,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4543,7 +4543,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4566,7 +4566,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4589,7 +4589,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4612,7 +4612,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4635,7 +4635,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4658,7 +4658,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4681,7 +4681,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4704,7 +4704,7 @@ define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4727,7 +4727,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4750,7 +4750,7 @@ define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4773,7 +4773,7 @@ define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4796,7 +4796,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4819,7 +4819,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4842,7 +4842,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4865,7 +4865,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4888,7 +4888,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4911,7 +4911,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4934,7 +4934,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4957,7 +4957,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4980,7 +4980,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5003,7 +5003,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5026,7 +5026,7 @@ define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5049,7 +5049,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5072,7 +5072,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5095,7 +5095,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5118,7 +5118,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5141,7 +5141,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5164,7 +5164,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5187,7 +5187,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5210,7 +5210,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5233,7 +5233,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5256,7 +5256,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5279,7 +5279,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5302,7 +5302,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5325,7 +5325,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5348,7 +5348,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5371,7 +5371,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5394,7 +5394,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5417,7 +5417,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5440,7 +5440,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5463,7 +5463,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5486,7 +5486,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5509,7 +5509,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5532,7 +5532,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5555,7 +5555,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5578,7 +5578,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5601,7 +5601,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5624,7 +5624,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5647,7 +5647,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5670,7 +5670,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5693,7 +5693,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5716,7 +5716,7 @@ define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5739,7 +5739,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5762,7 +5762,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5785,7 +5785,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5808,7 +5808,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5831,7 +5831,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5854,7 +5854,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5877,7 +5877,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5900,7 +5900,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5923,7 +5923,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5946,7 +5946,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5969,7 +5969,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5992,7 +5992,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6015,7 +6015,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6038,7 +6038,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6061,7 +6061,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6084,7 +6084,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6107,7 +6107,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6130,7 +6130,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
@@ -6153,7 +6153,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
index 616c81b8b223..5099a4c01376 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -12,7 +12,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -219,7 +219,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -242,7 +242,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -265,7 +265,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -288,7 +288,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -357,7 +357,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -380,7 +380,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -587,7 +587,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -610,7 +610,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -633,7 +633,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -656,7 +656,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -679,7 +679,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -702,7 +702,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -725,7 +725,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -748,7 +748,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -771,7 +771,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -794,7 +794,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -817,7 +817,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -840,7 +840,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -886,7 +886,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -909,7 +909,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -932,7 +932,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -955,7 +955,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -978,7 +978,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1001,7 +1001,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1024,7 +1024,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1047,7 +1047,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1070,7 +1070,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1093,7 +1093,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1116,7 +1116,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1139,7 +1139,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1162,7 +1162,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1185,7 +1185,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1208,7 +1208,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1231,7 +1231,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1254,7 +1254,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1277,7 +1277,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1300,7 +1300,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1323,7 +1323,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1346,7 +1346,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1369,7 +1369,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1392,7 +1392,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1415,7 +1415,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1438,7 +1438,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1461,7 +1461,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1484,7 +1484,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1507,7 +1507,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1530,7 +1530,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1553,7 +1553,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1576,7 +1576,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1599,7 +1599,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1622,7 +1622,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1645,7 +1645,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1668,7 +1668,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1691,7 +1691,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1714,7 +1714,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1737,7 +1737,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1760,7 +1760,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1783,7 +1783,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1806,7 +1806,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1829,7 +1829,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1852,7 +1852,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1875,7 +1875,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1898,7 +1898,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1921,7 +1921,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1944,7 +1944,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1967,7 +1967,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1990,7 +1990,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2013,7 +2013,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2036,7 +2036,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2059,7 +2059,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2082,7 +2082,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2105,7 +2105,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2128,7 +2128,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2151,7 +2151,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2174,7 +2174,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2197,7 +2197,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2220,7 +2220,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2243,7 +2243,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2266,7 +2266,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2289,7 +2289,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2312,7 +2312,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2335,7 +2335,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2358,7 +2358,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2381,7 +2381,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2404,7 +2404,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2427,7 +2427,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2450,7 +2450,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2473,7 +2473,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2496,7 +2496,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2519,7 +2519,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2542,7 +2542,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2565,7 +2565,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2588,7 +2588,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2611,7 +2611,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2634,7 +2634,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2657,7 +2657,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2680,7 +2680,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2703,7 +2703,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2726,7 +2726,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2749,7 +2749,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2772,7 +2772,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2795,7 +2795,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2818,7 +2818,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2841,7 +2841,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2864,7 +2864,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2887,7 +2887,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2910,7 +2910,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2933,7 +2933,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2956,7 +2956,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2979,7 +2979,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -3002,7 +3002,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3025,7 +3025,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3048,7 +3048,7 @@ define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3071,7 +3071,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3094,7 +3094,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3117,7 +3117,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3140,7 +3140,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3163,7 +3163,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3186,7 +3186,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3209,7 +3209,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3232,7 +3232,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3255,7 +3255,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3278,7 +3278,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3301,7 +3301,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3324,7 +3324,7 @@ define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3347,7 +3347,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3370,7 +3370,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3393,7 +3393,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3416,7 +3416,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3439,7 +3439,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3462,7 +3462,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3485,7 +3485,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3508,7 +3508,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3531,7 +3531,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3554,7 +3554,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3600,7 +3600,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3623,7 +3623,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3646,7 +3646,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3669,7 +3669,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3692,7 +3692,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3715,7 +3715,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3738,7 +3738,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3761,7 +3761,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3784,7 +3784,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3807,7 +3807,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3830,7 +3830,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3853,7 +3853,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3876,7 +3876,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3899,7 +3899,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3922,7 +3922,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3945,7 +3945,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3968,7 +3968,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3991,7 +3991,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -4014,7 +4014,7 @@ define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4037,7 +4037,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x h
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4060,7 +4060,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4083,7 +4083,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4106,7 +4106,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4129,7 +4129,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4152,7 +4152,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4175,7 +4175,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4198,7 +4198,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4221,7 +4221,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4244,7 +4244,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4267,7 +4267,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x f
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4290,7 +4290,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4313,7 +4313,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4336,7 +4336,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4359,7 +4359,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4382,7 +4382,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4405,7 +4405,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4428,7 +4428,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4451,7 +4451,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x doubl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4474,7 +4474,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4497,7 +4497,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4520,7 +4520,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4543,7 +4543,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4566,7 +4566,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4589,7 +4589,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4612,7 +4612,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4635,7 +4635,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4658,7 +4658,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4681,7 +4681,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4704,7 +4704,7 @@ define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4727,7 +4727,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4750,7 +4750,7 @@ define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4773,7 +4773,7 @@ define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4796,7 +4796,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4819,7 +4819,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4842,7 +4842,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4865,7 +4865,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4888,7 +4888,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4911,7 +4911,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4934,7 +4934,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4957,7 +4957,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4980,7 +4980,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5003,7 +5003,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5026,7 +5026,7 @@ define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5049,7 +5049,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5072,7 +5072,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5095,7 +5095,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5118,7 +5118,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5141,7 +5141,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5164,7 +5164,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5187,7 +5187,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5210,7 +5210,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5233,7 +5233,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5256,7 +5256,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5279,7 +5279,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5302,7 +5302,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5325,7 +5325,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5348,7 +5348,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5371,7 +5371,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5394,7 +5394,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5417,7 +5417,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5440,7 +5440,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5463,7 +5463,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5486,7 +5486,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5509,7 +5509,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5532,7 +5532,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5555,7 +5555,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5578,7 +5578,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5601,7 +5601,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5624,7 +5624,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5647,7 +5647,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5670,7 +5670,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5693,7 +5693,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5716,7 +5716,7 @@ define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5739,7 +5739,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x ha
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5762,7 +5762,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5785,7 +5785,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5808,7 +5808,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5831,7 +5831,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5854,7 +5854,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5877,7 +5877,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5900,7 +5900,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5923,7 +5923,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5946,7 +5946,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5969,7 +5969,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x fl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5992,7 +5992,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6015,7 +6015,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6038,7 +6038,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6061,7 +6061,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6084,7 +6084,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6107,7 +6107,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6130,7 +6130,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0,
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
@@ -6153,7 +6153,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
index 0ff06653b217..f6eda4930760 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
index 4e0111f3e387..b4840b4d5abe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
index 0fe3f0d2abeb..c7a215364dd9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vs
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
index 2a86c6bd1dd9..3eb86f8a8a19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vs
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
index ec449f68f7c5..292c76b4de21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
index dc150cb3b51e..1a4557b9b800 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
index 25304be95113..c582ab34384f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i1
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<v
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
index 8f370e407147..8588f3862dd4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i1
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<v
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
index b06c422bcc34..127bb02f802c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@ define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
index 94773adf9527..32a0726a6fe1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@ define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
index da6af93aef8f..99980675ad9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
index 1a487997f37e..829fce1b272c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
index 277595b1195f..406b21fcb47c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
index 382219875266..9e9951db9c88 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@ define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@ define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@ define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@ define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@ define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@ define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@ define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@ define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
index 8ccc25a49fbd..7d6f14ef3348 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
index 89fbadc8f9fd..4fcdd034db11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 2 x i16>  @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16>  @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 8 x i16>  @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 16 x i16>  @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 32 x i16>  @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 1 x i32>  @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@ define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32>  @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 4 x i32>  @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 8 x i32>  @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 16 x i32>  @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 1 x i64>  @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@ define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i64>  @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64>  @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 8 x i64>  @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
index dc55be7dfe06..0969d4109f2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
index 43aafadca8ba..6eca943b557d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
index 9728f7e197ea..7a3bd7498cb4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
index 61b9a58c9d21..e24cec1f5624 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8(<vscale x 3
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32(<vscale x 2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32(<vscale x 4
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32(<vscale x 8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
index d2d4bd345333..1342e065ada8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
index 0d18165c585e..cdebffc7a273 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
index 2f8799ebedb7..e2dd028e6c23 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
     <vscale x 4 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
     <vscale x 4 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
     <vscale x 4 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
     <vscale x 4 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
     <vscale x 4 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
     <vscale x 2 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
     <vscale x 2 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
     <vscale x 1 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
     <vscale x 1 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
index 3b81965d1aa4..2f436a345289 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
     <vscale x 4 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
     <vscale x 4 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
     <vscale x 4 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
     <vscale x 4 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
     <vscale x 4 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
     <vscale x 2 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
     <vscale x 2 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
     <vscale x 1 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
     <vscale x 1 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
index 58946831966a..58e8c1811bcc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
     <vscale x 4 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
     <vscale x 4 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
     <vscale x 4 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
     <vscale x 4 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
     <vscale x 4 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
     <vscale x 2 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
     <vscale x 2 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
     <vscale x 1 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
     <vscale x 1 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
index 9370d64dbd4a..9a80e4ee5916 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
     <vscale x 4 x i16> %0,
@@ -35,7 +35,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -58,7 +58,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
     <vscale x 4 x i16> %0,
@@ -81,7 +81,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -104,7 +104,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
     <vscale x 4 x i16> %0,
@@ -173,7 +173,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -196,7 +196,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
     <vscale x 4 x i16> %0,
@@ -219,7 +219,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -242,7 +242,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
     <vscale x 4 x i16> %0,
@@ -265,7 +265,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -288,7 +288,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -403,7 +403,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -426,7 +426,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
     <vscale x 2 x i32> %0,
@@ -495,7 +495,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -518,7 +518,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
     <vscale x 2 x i32> %0,
@@ -541,7 +541,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -633,7 +633,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -656,7 +656,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
     <vscale x 1 x i64> %0,
@@ -679,7 +679,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -702,7 +702,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
     <vscale x 1 x i64> %0,
@@ -771,7 +771,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -794,7 +794,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
     <vscale x 1 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
index 3c5801d82d0f..57c9256aa405 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
index ec7545aba149..493e59775733 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscal
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
index e7abd4fbf64f..b34905a90270 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vs
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
index 3c233157ccd0..727c92ea82c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vs
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
index 72aad4259045..3bb39c424247 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
index 298e3c894707..65dab019d5cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16(<vsca
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16(
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8(<vscale x 16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8(<vscale x 32
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16(<vscale x
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16(<vsca
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
index 124229317e71..3b008ad38b33 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i1
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<v
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
index 1e83630630c9..a778a4487bb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vsc
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
 define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vs
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i1
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<v
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vsc
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
index 05073f5fe7d6..333e50f0280c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vxor.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vxor.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vxor.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
index 2024fafce7ee..3b150f05a9d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<v
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<v
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscal
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscal
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@ define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@ define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@ define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@ define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@ define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@ define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@ define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@ define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@ define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@ define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@ define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@ define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@ define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@ define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@ define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@ define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@ define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@ define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@ define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@ define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@ define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
index 9c3d47a36eb9..21a8b34f79d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -11,7 +11,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %1,
@@ -52,7 +52,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %1,
@@ -93,7 +93,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %1,
@@ -134,7 +134,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %1,
@@ -175,7 +175,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -195,7 +195,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %1,
@@ -216,7 +216,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -236,7 +236,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %1,
@@ -257,7 +257,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -277,7 +277,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %1,
@@ -298,7 +298,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -318,7 +318,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %1,
@@ -339,7 +339,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -359,7 +359,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %1,
@@ -380,7 +380,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -400,7 +400,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %1,
@@ -421,7 +421,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -441,7 +441,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %1,
@@ -462,7 +462,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -482,7 +482,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %1,
@@ -503,7 +503,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -523,7 +523,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_mask_vf4_nxv16i32(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %1,
@@ -544,7 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %1,
@@ -585,7 +585,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %1,
@@ -626,7 +626,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -646,7 +646,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %1,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %1,
@@ -708,7 +708,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -728,7 +728,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %1,
@@ -749,7 +749,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %1,
@@ -790,7 +790,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -810,7 +810,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %1,
@@ -831,7 +831,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, i3
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -851,7 +851,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %1,
@@ -872,7 +872,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -892,7 +892,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_mask_vf2_nxv16i32(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %1,
@@ -913,7 +913,7 @@ define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -933,7 +933,7 @@ define <vscale x 1 x i16> @intrinsic_vzext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %1,
@@ -954,7 +954,7 @@ define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -974,7 +974,7 @@ define <vscale x 2 x i16> @intrinsic_vzext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %1,
@@ -995,7 +995,7 @@ define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1015,7 +1015,7 @@ define <vscale x 4 x i16> @intrinsic_vzext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %1,
@@ -1036,7 +1036,7 @@ define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, i32
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1056,7 +1056,7 @@ define <vscale x 8 x i16> @intrinsic_vzext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %1,
@@ -1077,7 +1077,7 @@ define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1097,7 +1097,7 @@ define <vscale x 16 x i16> @intrinsic_vzext_mask_vf2_nxv16i16(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %1,
@@ -1118,7 +1118,7 @@ define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 32 x i16> @intrinsic_vzext_mask_vf2_nxv32i16(<vscale x 32 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %1,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
index 97c09dfd42f3..abfaa5ff7368 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -11,7 +11,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %1,
@@ -52,7 +52,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %1,
@@ -93,7 +93,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %1,
@@ -134,7 +134,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %1,
@@ -175,7 +175,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -195,7 +195,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %1,
@@ -216,7 +216,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -236,7 +236,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %1,
@@ -257,7 +257,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -277,7 +277,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %1,
@@ -298,7 +298,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -318,7 +318,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %1,
@@ -339,7 +339,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -359,7 +359,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %1,
@@ -380,7 +380,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -400,7 +400,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %1,
@@ -421,7 +421,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -441,7 +441,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %1,
@@ -462,7 +462,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -482,7 +482,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %1,
@@ -503,7 +503,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -523,7 +523,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_mask_vf4_nxv16i32(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %1,
@@ -544,7 +544,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %1,
@@ -585,7 +585,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -605,7 +605,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %1,
@@ -626,7 +626,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -646,7 +646,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %1,
@@ -667,7 +667,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %1,
@@ -708,7 +708,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -728,7 +728,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %1,
@@ -749,7 +749,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %1,
@@ -790,7 +790,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -810,7 +810,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %1,
@@ -831,7 +831,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, i6
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -851,7 +851,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %1,
@@ -872,7 +872,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -892,7 +892,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_mask_vf2_nxv16i32(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %1,
@@ -913,7 +913,7 @@ define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -933,7 +933,7 @@ define <vscale x 1 x i16> @intrinsic_vzext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %1,
@@ -954,7 +954,7 @@ define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -974,7 +974,7 @@ define <vscale x 2 x i16> @intrinsic_vzext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %1,
@@ -995,7 +995,7 @@ define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1015,7 +1015,7 @@ define <vscale x 4 x i16> @intrinsic_vzext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %1,
@@ -1036,7 +1036,7 @@ define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, i64
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1056,7 +1056,7 @@ define <vscale x 8 x i16> @intrinsic_vzext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %1,
@@ -1077,7 +1077,7 @@ define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1097,7 +1097,7 @@ define <vscale x 16 x i16> @intrinsic_vzext_mask_vf2_nxv16i16(<vscale x 16 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %1,
@@ -1118,7 +1118,7 @@ define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0,
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1138,7 +1138,7 @@ define <vscale x 32 x i16> @intrinsic_vzext_mask_vf2_nxv32i16(<vscale x 32 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %1,

diff  --git a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
index bcfcec533114..516ffc0b6b00 100644
--- a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
+++ b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh \
-; RUN:   -verify-machineinstrs --riscv-no-aliases < %s \
+; RUN:   -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s
 
 declare half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half>)
@@ -24,7 +24,7 @@ define <vscale x 1 x half> @intrinsic_vfmv.f.s_s_nxv1f16(<vscale x 1 x half> %0,
 ; CHECK-NEXT:    flh ft0, 14(sp) # 2-byte Folded Reload
 ; CHECK-NEXT:    vfmv.v.f v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half> %0)
   tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
@@ -45,7 +45,7 @@ define <vscale x 1 x float> @intrinsic_vfmv.f.s_s_nxv1f32(<vscale x 1 x float> %
 ; CHECK-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    vfmv.v.f v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float> %0)
   tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
@@ -66,7 +66,7 @@ define <vscale x 1 x double> @intrinsic_vfmv.f.s_s_nxv1f64(<vscale x 1 x double>
 ; CHECK-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    vfmv.v.f v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double> %0)
   tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()


        


More information about the llvm-commits mailing list